Skip to content

Commit c5b3f01

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent 3588ec9 commit c5b3f01

File tree

2 files changed

+41
-42
lines changed

2 files changed

+41
-42
lines changed

src/decisionengine_modules/NERSC/sources/NerscAllocationInfo.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@
3636
)
3737
@Source.produces(Nersc_Allocation_Info=pd.DataFrame)
3838
class NerscAllocationInfo(Source.Source):
39-
4039
"""
4140
Information of allocations on NERSC machines
4241
"""

src/decisionengine_modules/glideinwms/glide_frontend_element.py

Lines changed: 41 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -520,9 +520,9 @@ def create_glideclient_classads(
520520
my_name = f"{self.frontend_name}.{self.fe_group}"
521521
gc_classad = classads.GlideClientClassad(glidein_name, my_name)
522522
# Make the classad name unique by adding credential id to it
523-
gc_classad.adParams[
524-
"Name"
525-
] = f"{self.file_id_cache.file_id(cred, cred.filename)}_{gc_classad.adParams['Name']}"
523+
gc_classad.adParams["Name"] = (
524+
f"{self.file_id_cache.file_id(cred, cred.filename)}_{gc_classad.adParams['Name']}"
525+
)
526526
gc_classad.adParams["CollectorHost"] = factory_pool
527527
gc_classad.adParams["FrontendName"] = self.frontend_name
528528
gc_classad.adParams["GroupName"] = self.fe_group
@@ -670,7 +670,7 @@ def create_glideclientglobal_classads(self, glidefactory_classad, key_builder):
670670
}
671671
for cred in credentials:
672672
if cred.advertize:
673-
for (fname, data) in cred.loaded_data:
673+
for fname, data in cred.loaded_data:
674674
classad_attrs_to_encrypt[cred.file_id(fname)] = data
675675
if hasattr(cred, "security_class"):
676676
# Convert security_class to string for factory
@@ -946,7 +946,7 @@ def count_match(self, job_types, job_type, entries):
946946
# Get group of jobs based on request cpus
947947
job_groups = jobs.groupby("RequestCpus")
948948

949-
for (req_cpus, group) in job_groups:
949+
for req_cpus, group in job_groups:
950950
# Group jobs by matching criteria: RequestCpus for now
951951
# We care about job counts for each group
952952
job_count = len(group)
@@ -1073,31 +1073,31 @@ def identify_limits_triggered(
10731073

10741074
# Identify the limits triggered for advertising in glideresource
10751075
if count_status["Total"] >= self.entry_max_glideins: # max_running
1076-
limits_triggered[
1077-
"TotalGlideinsPerEntry"
1078-
] = f"count={count_status['Total']}, limit={self.entry_max_glideins,}"
1076+
limits_triggered["TotalGlideinsPerEntry"] = (
1077+
f"count={count_status['Total']}, limit={self.entry_max_glideins,}"
1078+
)
10791079
if count_status["Idle"] >= self.entry_max_slots_idle: # max_vms_idle
1080-
limits_triggered[
1081-
"IdleGlideinsPerEntry"
1082-
] = f"count={count_status['Idle']}, limit={self.entry_max_slots_idle}"
1080+
limits_triggered["IdleGlideinsPerEntry"] = (
1081+
f"count={count_status['Idle']}, limit={self.entry_max_slots_idle}"
1082+
)
10831083
if total_glideins >= self.total_max_slots: # was total_max_glideins
10841084
limits_triggered["TotalGlideinsPerGroup"] = f"count={total_glideins}, limit={self.total_max_slots}"
10851085
if total_idle_glideins >= self.total_max_slots_idle: # was total_max_vms_idle
10861086
limits_triggered["IdleGlideinsPerGroup"] = f"count={total_idle_glideins}, limit={self.total_max_slots_idle}"
10871087
if fe_total_glideins >= self.fe_total_max_slots: # fe_total_max_glideins
10881088
limits_triggered["TotalGlideinsPerFrontend"] = f"count={fe_total_glideins}, limit={self.fe_total_max_slots}"
10891089
if fe_total_idle_glideins >= self.fe_total_max_slots_idle: # fe_total_max_vms_idle
1090-
limits_triggered[
1091-
"IdleGlideinsPerFrontend"
1092-
] = f"count={fe_total_idle_glideins}, limit={self.fe_total_max_slots_idle}"
1090+
limits_triggered["IdleGlideinsPerFrontend"] = (
1091+
f"count={fe_total_idle_glideins}, limit={self.fe_total_max_slots_idle}"
1092+
)
10931093
if global_total_glideins >= self.global_total_max_slots: # global_total_max_glideins
1094-
limits_triggered[
1095-
"TotalGlideinsGlobal"
1096-
] = f"count={global_total_glideins}, limit={self.global_total_max_slots}"
1094+
limits_triggered["TotalGlideinsGlobal"] = (
1095+
f"count={global_total_glideins}, limit={self.global_total_max_slots}"
1096+
)
10971097
if global_total_idle_glideins >= self.global_total_max_slots_idle: # global_total_max_vms_idle
1098-
limits_triggered[
1099-
"IdleGlideinsGlobal"
1100-
] = f"count={global_total_idle_glideins}, limit={self.global_total_max_slots_idle}"
1098+
limits_triggered["IdleGlideinsGlobal"] = (
1099+
f"count={global_total_idle_glideins}, limit={self.global_total_max_slots_idle}"
1100+
)
11011101

11021102
def compute_glidein_min_idle(
11031103
self,
@@ -1178,37 +1178,37 @@ def compute_glidein_min_idle(
11781178

11791179
if count_status["Idle"] >= self.entry_curb_slots_idle:
11801180
glidein_min_idle /= 2 # above first treshold, reduce
1181-
limits_triggered[
1182-
"CurbIdleGlideinsPerEntry"
1183-
] = f"count={count_status['Idle']}, curb={self.entry_curb_slots_idle,}"
1181+
limits_triggered["CurbIdleGlideinsPerEntry"] = (
1182+
f"count={count_status['Idle']}, curb={self.entry_curb_slots_idle,}"
1183+
)
11841184
if total_glideins >= self.total_curb_slots:
11851185
glidein_min_idle /= 2 # above global treshold, reduce further
11861186
limits_triggered["CurbTotalGlideinsPerGroup"] = f"count={total_glideins}, curb={self.total_curb_slots}"
11871187
if total_idle_glideins >= self.total_curb_slots_idle:
11881188
glidein_min_idle /= 2 # above global treshold, reduce further
1189-
limits_triggered[
1190-
"CurbIdleGlideinsPerGroup"
1191-
] = f"count={total_idle_glideins}, curb={self.total_curb_slots_idle}"
1189+
limits_triggered["CurbIdleGlideinsPerGroup"] = (
1190+
f"count={total_idle_glideins}, curb={self.total_curb_slots_idle}"
1191+
)
11921192
if fe_total_glideins >= self.fe_total_curb_slots:
11931193
glidein_min_idle /= 2 # above global treshold, reduce further
1194-
limits_triggered[
1195-
"CurbTotalGlideinsPerFrontend"
1196-
] = f"count={fe_total_glideins}, curb={self.fe_total_curb_slots}"
1194+
limits_triggered["CurbTotalGlideinsPerFrontend"] = (
1195+
f"count={fe_total_glideins}, curb={self.fe_total_curb_slots}"
1196+
)
11971197
if fe_total_idle_glideins >= self.fe_total_curb_slots_idle:
11981198
glidein_min_idle /= 2 # above global treshold, reduce further
1199-
limits_triggered[
1200-
"CurbIdleGlideinsPerFrontend"
1201-
] = f"count={fe_total_idle_glideins}, curb={self.fe_total_curb_slots_idle}"
1199+
limits_triggered["CurbIdleGlideinsPerFrontend"] = (
1200+
f"count={fe_total_idle_glideins}, curb={self.fe_total_curb_slots_idle}"
1201+
)
12021202
if global_total_glideins >= self.global_total_curb_slots:
12031203
glidein_min_idle /= 2 # above global treshold, reduce further
1204-
limits_triggered[
1205-
"CurbTotalGlideinsGlobal"
1206-
] = f"count={global_total_glideins}, curb={self.global_total_curb_slots}"
1204+
limits_triggered["CurbTotalGlideinsGlobal"] = (
1205+
f"count={global_total_glideins}, curb={self.global_total_curb_slots}"
1206+
)
12071207
if global_total_idle_glideins >= self.global_total_curb_slots_idle:
12081208
glidein_min_idle /= 2 # above global treshold, reduce further
1209-
limits_triggered[
1210-
"CurbIdleGlideinsGlobal"
1211-
] = f"count={global_total_idle_glideins}, curb={self.global_total_curb_slots_idle}"
1209+
limits_triggered["CurbIdleGlideinsGlobal"] = (
1210+
f"count={global_total_idle_glideins}, curb={self.global_total_curb_slots_idle}"
1211+
)
12121212

12131213
if glidein_min_idle < 1:
12141214
glidein_min_idle = 1
@@ -1809,7 +1809,7 @@ def count_match_fom_bff(self, job_types, job_type, entries):
18091809

18101810
dbg_info.append(f"{len(job_groups)} job groups")
18111811

1812-
for (req_cpus, job_group) in job_groups:
1812+
for req_cpus, job_group in job_groups:
18131813
# Group jobs by matching criteria: RequestCpus for now
18141814
# We only care about job counts for each group
18151815
job_count = len(job_group)
@@ -1913,7 +1913,7 @@ def count_match_fom_dff(self, job_types, job_type, entries):
19131913
# Get group of jobs based on request cpus
19141914
job_groups = jobs.groupby("RequestCpus")
19151915

1916-
for (req_cpus, job_group) in job_groups:
1916+
for req_cpus, job_group in job_groups:
19171917
# Group jobs by matching criteria: RequestCpus for now
19181918
# We care about job counts for each group
19191919
job_count = len(job_group)
@@ -1950,7 +1950,7 @@ def count_match_fom_dff(self, job_types, job_type, entries):
19501950
# Start with entries with lowest FOM and fill them first
19511951
job_count_matched = 0
19521952

1953-
for (_fom, fom_group_entries) in fom_matches:
1953+
for _fom, fom_group_entries in fom_matches:
19541954
job_count_unmatched = job_count - job_count_matched
19551955
if job_count_unmatched > 0:
19561956
# Distribute the jobs equally among this entry group

0 commit comments

Comments
 (0)