diff --git a/hitapp_server/api/main.py b/hitapp_server/api/main.py index bd625a6..c1c2a62 100644 --- a/hitapp_server/api/main.py +++ b/hitapp_server/api/main.py @@ -78,16 +78,19 @@ @app.get("/") -async def root(): - return {"message": "Welcome to the HIT App server."} +async def root(): + """Health check endpoint.""" + return {"message": "Welcome to the HIT App server."} @app.get("/version") -def test(): - return {'version': '0.1'} +def test(): + """Return the service version.""" + return {'version': '0.1'} -def set_base_url(url): +def set_base_url(url): + """Extract and store the base URL used for redirections.""" global BASE_URL if BASE_URL is not None: return @@ -107,14 +110,15 @@ def set_base_url(url): @app.post("/projects") -async def create_project(response: Response, - request: Request, - background_tasks: BackgroundTasks, - html_template: UploadFile = File(...), - csv_variables: UploadFile = File(...), - project_name: str = Form(...), - num_assignment: int = Form(...), - platform: str = Form(...)): +async def create_project(response: Response, + request: Request, + background_tasks: BackgroundTasks, + html_template: UploadFile = File(...), + csv_variables: UploadFile = File(...), + project_name: str = Form(...), + num_assignment: int = Form(...), + platform: str = Form(...)): + """Create a new project and start HIT creation in the background.""" set_base_url(request.headers['referer']) with conn.cursor() as cursor: response.status_code = status.HTTP_202_ACCEPTED @@ -147,21 +151,19 @@ async def create_project(response: Response, return res -def generate_hit_id(row): +def generate_hit_id(row): + """Generate a unique identifier for a HIT.""" flatten_row = ''.join(row.values())+str(random.randint(1000, 9999)) return hashlib.md5(flatten_row.encode()).hexdigest() -def get_customized_html(hit_app_template, row): - return hit_app_template.render(row) +def get_customized_html(hit_app_template, row): + """Render the HIT HTML template with row values.""" + return hit_app_template.render(row) -async def create_hits(project_id, project_name, html_template, input_csv): - """ - Runs in the background and create the HITs from the template - :param project_id: - :return: - """ +async def create_hits(project_id, project_name, html_template, input_csv): + """Background task that creates the HITs from the template.""" print('start background task') with conn.cursor() as cursor: config = {} @@ -220,7 +222,8 @@ async def create_hits(project_id, project_name, html_template, input_csv): @app.get("/projects") -def list_project(request: Request, skip: int = 0, limit: int = 20): +def list_project(request: Request, skip: int = 0, limit: int = 20): + """Return a list of projects.""" set_base_url(request.headers['referer']) projects = [] with conn.cursor() as cursor: @@ -231,7 +234,8 @@ def list_project(request: Request, skip: int = 0, limit: int = 20): @app.get("/projects/{id}") -def get_project(request: Request, id: int): +def get_project(request: Request, id: int): + """Return details of a single project.""" set_base_url(request.headers['referer']) with conn.cursor() as cursor: print(id) @@ -240,17 +244,14 @@ def get_project(request: Request, id: int): return{'project': project} @app.get("/projects/{id}/answers/download") -def get_project_results(request: Request, id:int, background_tasks: BackgroundTasks): +def get_project_results(request: Request, id:int, background_tasks: BackgroundTasks): + """Start generation of the result CSV for a project.""" set_base_url(request.headers['referer']) background_tasks.add_task(create_ans_csv, id) -async def create_ans_csv(project_id): - """ - Runs in the background and create the answer.csv to download - :param project_id: - :return: - """ +async def create_ans_csv(project_id): + """Background task that creates the answer CSV to download.""" hits = {} # get the HITs with conn.cursor() as cursor: @@ -272,24 +273,24 @@ async def create_ans_csv(project_id): conn.commit() -async def write_dict_as_csv(dic_to_write, file_name): - """ - async with aiofiles.open(file_name, 'w', newline='', encoding="utf8") as output_file: - if len(dic_to_write)>0: - headers = list(dic_to_write[0].keys()) - writer = csv.DictWriter(output_file, fieldnames=headers) - await writer.writeheader() - for d in dic_to_write: - await writer.writerow(d) - """ +async def write_dict_as_csv(dic_to_write, file_name): + """Write a list of dictionaries to a CSV file.""" + async with aiofiles.open(file_name, 'w', newline='', encoding="utf8") as output_file: + if len(dic_to_write) > 0: + headers = list(dic_to_write[0].keys()) + writer = csv.DictWriter(output_file, fieldnames=headers) + await writer.writeheader() + for d in dic_to_write: + await writer.writerow(d) df = pd.DataFrame(dic_to_write) df = df.fillna("") df.to_csv(file_name, index=False) @app.get("/projects/{id}/answers/count") -def get_amt_data(request: Request, response: Response, id: int): - set_base_url(request.headers['referer']) +def get_amt_data(request: Request, response: Response, id: int): + """Return the number of answers stored for a project.""" + set_base_url(request.headers['referer']) with conn.cursor() as cursor: cursor.execute("""SELECT count(id) as count FROM "Answers" where "ProjectId"=%s """, (id,)) result = cursor.fetchone() @@ -298,8 +299,9 @@ def get_amt_data(request: Request, response: Response, id: int): @app.post("/answers/{project_id}") -async def add_answer(response: Response, info : Request, x_real_ip: str = Header(None, alias='X-Real-IP')): - req_info = await info.json() +async def add_answer(response: Response, info : Request, x_real_ip: str = Header(None, alias='X-Real-IP')): + """Store an answer coming from the HIT application.""" + req_info = await info.json() key_data, answers = json_formater(req_info, 'Answer.') with conn.cursor() as cursor: v_code = generate_vcode() @@ -316,7 +318,8 @@ async def add_answer(response: Response, info : Request, x_real_ip: str = Header return {'vcode': v_code} -def json_formater(ajax_post, prefix=""): +def json_formater(ajax_post, prefix=""): + """Split AJAX post data into key data and answer dictionary.""" key_prop = ["hittypeid", "hitid", "assignmentid", "workerid", "url", "campaignid", "projectid"] key_remove = ["start_working_time","submission_time"] key_without_prefix = ["work_duration_sec"] @@ -334,16 +337,15 @@ def json_formater(ajax_post, prefix=""): return key_data, data -def generate_vcode(): - rand = str(random.randint(1000, 9999)) + str(random.randint(1000, 9999)) + str(random.randint(1000, 9999)) - return hashlib.md5(rand.encode()).hexdigest() +def generate_vcode(): + """Generate a random verification code.""" + rand = str(random.randint(1000, 9999)) + str(random.randint(1000, 9999)) + str(random.randint(1000, 9999)) + return hashlib.md5(rand.encode()).hexdigest() @app.delete("/projects/{id}") -def del_project(id: int, background_tasks: BackgroundTasks): - """ - Deletes a project - """ +def del_project(id: int, background_tasks: BackgroundTasks): + """Delete a project and its associated files.""" with conn.cursor() as cursor: # delete answers #cursor.execute(""" DELETE FROM public."Answers" WHERE ProjectId= %s""", (id,)) @@ -372,8 +374,9 @@ def del_project(id: int, background_tasks: BackgroundTasks): # delete project cursor.execute(""" DELETE FROM "Projects" WHERE id= %s""", (id,)) -def delete_file(filename): - if filename[:4] == "http": +def delete_file(filename): + """Remove a file from disk.""" + if filename[:4] == "http": filename = "/".join(filename.split("/")[3:]) print(f"delete_file: {filename}") try: @@ -381,10 +384,10 @@ def delete_file(filename): except Exception as e: print(e) -""" -@app.post("/rec") -async def store_recordings(assignment_id: str = Form(...) , file: UploadFile = File(...)): - v_code = generate_vcode() +@app.post("/rec") +async def store_recordings(assignment_id: str = Form(...) , file: UploadFile = File(...)): + """Store uploaded audio recordings.""" + v_code = generate_vcode() print(f'store_recordings: {assignment_id}, {v_code}') out_file_path_html=Path(BASE_DIR, f"static/rec/{assignment_id}.wav") # store html file @@ -396,17 +399,17 @@ async def store_recordings(assignment_id: str = Form(...) , file: UploadFile = F @app.post("/recjson") -async def store_recordings2(response: Response, info : Request): - req_info = await info.json() +async def store_recordings2(response: Response, info : Request): + """Store recordings provided as JSON payload.""" + req_info = await info.json() print(req_info) @app.get("/rec_exist/{assignment_id}") -def check_recording_exist(response: Response, assignment_id:str): - out_file_path_html = Path(BASE_DIR, f"static/rec/{assignment_id}.wav") +def check_recording_exist(response: Response, assignment_id:str): + """Check if a recording exists for the given assignment.""" + out_file_path_html = Path(BASE_DIR, f"static/rec/{assignment_id}.wav") if os.path.isfile(out_file_path_html): return {'exist': 1} - else: - return {'exist': 0} - -""" \ No newline at end of file + else: + return {'exist': 0} diff --git a/hitapp_server/configure/configure.py b/hitapp_server/configure/configure.py index 97723ef..ebd3f76 100644 --- a/hitapp_server/configure/configure.py +++ b/hitapp_server/configure/configure.py @@ -21,6 +21,7 @@ def parse_args(): return args def main(): + """Run the configuration helper.""" args = parse_args() with open(args.config, 'r') as f: config = yaml.safe_load(f) diff --git a/src/azure_clip_storage.py b/src/azure_clip_storage.py index 41d02a1..ec7be11 100644 --- a/src/azure_clip_storage.py +++ b/src/azure_clip_storage.py @@ -8,7 +8,18 @@ class AzureClipStorage: + """Helper class to access clips stored in Azure blob containers.""" + def __init__(self, config, alg): + """Initialize the storage helper. + + Parameters + ---------- + config : dict + Dictionary containing the Azure storage configuration. + alg : str + Name of the algorithm this storage belongs to. + """ self._account_name = os.path.basename( config['StorageUrl']).split('.')[0] @@ -51,8 +62,9 @@ def modified_clip_names(self): return self._modified_clip_names async def retrieve_contents(self, list_generator, dirname=''): + """Populate ``_clip_names`` from an Azure blob listing.""" for e in list_generator: - if not '.wav' in e.name: + if '.wav' not in e.name: continue if dirname: @@ -62,6 +74,7 @@ async def retrieve_contents(self, list_generator, dirname=''): self._clip_names.append(clip_path) async def get_clips(self): + """Retrieve clip names from the container.""" blobs = self.store_service.list_blobs( name_starts_with=self.clips_path) @@ -77,15 +90,19 @@ async def get_clips(self): await self.retrieve_contents(blobs) def make_clip_url(self, filename): + """Create a full URL for a clip inside the container.""" return f"https://{self._account_name}.blob.core.windows.net/{self._container}/{filename}?{self._SAS_token}" # todo what about dcr class GoldSamplesInStore(AzureClipStorage): + """Storage helper for gold standard clips.""" + def __init__(self, config, alg): super().__init__(config, alg) self._SAS_token = '' async def get_dataframe(self): + """Return a DataFrame describing all gold clips.""" clips = await self.clip_names df = pd.DataFrame(columns=['gold_clips_pvs', 'gold_clips_ans']) clipsList = [] @@ -102,7 +119,10 @@ async def get_dataframe(self): # todo what about dcr class TrappingSamplesInStore(AzureClipStorage): + """Storage helper for trapping question clips.""" + async def get_dataframe(self): + """Return a DataFrame describing all trapping clips.""" clips = await self.clip_names df = pd.DataFrame(columns=['trapping_pvs', 'trapping_ans']) clipsList = [] @@ -129,9 +149,11 @@ async def get_dataframe(self): df = df.append(clipsList) return df -""" class PairComparisonSamplesInStore(AzureClipStorage): + """Storage helper for pair-comparison clips.""" + async def get_dataframe(self): + """Return a DataFrame describing all pair-comparison clips.""" clips = await self.clip_names pair_a_clips = [self.make_clip_url(clip) for clip in clips if '40S_' in clip] @@ -139,4 +161,3 @@ async def get_dataframe(self): df = pd.DataFrame({'pair_a': pair_a_clips, 'pair_b': pair_b_clips}) return df -""" \ No newline at end of file diff --git a/src/create_input.py b/src/create_input.py index 7f156df..f418e41 100644 --- a/src/create_input.py +++ b/src/create_input.py @@ -160,11 +160,32 @@ def add_clips_balanced_block(clips, condition_pattern, keys, n_clips_per_session return output_df -def add_clips_balanced_block_ccr(clips, refs, condition_pattern, keys, n_clips_per_session, output_df): - # create the structure only using clips - add_clips_balanced_block(clips, condition_pattern, keys, n_clips_per_session, output_df) - clips = clips.tolist() - refs = refs.tolist() +def add_clips_balanced_block_ccr(clips, refs, condition_pattern, keys, n_clips_per_session, output_df): + """Create the balanced block structure for CCR/DCR tests. + + Parameters + ---------- + clips : pd.Series + List of clips to be used as processed videos. + refs : pd.Series + List of reference clips matching ``clips``. + condition_pattern : str + Regex pattern describing the conditions embedded in the filenames. + keys : str + Keys describing the order of the conditions. + n_clips_per_session : int + Number of clips that should appear in a session. + output_df : pd.DataFrame + DataFrame that will be filled with the packed clips. + + Returns + ------- + None + """ + # create the structure only using clips + add_clips_balanced_block(clips, condition_pattern, keys, n_clips_per_session, output_df) + clips = clips.tolist() + refs = refs.tolist() for q in range(n_clips_per_session): clip_list = output_df[f'Q{q}'].tolist() ref_list = [] @@ -254,21 +275,56 @@ def add_clips_random_acrhr(clips, refs, n_clips_per_session, output_df): output_df[f'Q{q}_R'] = refs_sessions[:, q] -def get_random_plate(df, plate,n_sessions): - df_cv = df[['cv_plate', 'cv_url']].copy() - # filter for cv_plate = plate_3 - df_cv_plate = df_cv[df_cv['cv_plate'] == plate].copy() - # randomly select n_sessions , it could be n_sessions > number of rows in df_cv_plate3 - if n_sessions > df_cv_plate.shape[0]: - df_cv_plate = df_cv_plate.sample(n=n_sessions, replace=True) - else: - df_cv_plate = df_cv_plate.sample(n=n_sessions, replace=False) - - return df_cv_plate['cv_url'].tolist() - -def get_random_block_matrix(df, type, n_sessions): - - cols = ['circles','triangles','block_matrix_url'] +def get_random_plate(df, plate, n_sessions): + """Randomly choose color vision plates for the given session count. + + Parameters + ---------- + df : pandas.DataFrame + DataFrame containing ``cv_plate`` and ``cv_url`` columns. + plate : str + Plate identifier to filter on (e.g. ``'plate_3'``). + n_sessions : int + Number of sessions requiring a plate. + + Returns + ------- + list[str] + List of URLs to be used for the selected sessions. + """ + df_cv = df[['cv_plate', 'cv_url']].copy() + # Filter for the requested plate number + df_cv_plate = df_cv[df_cv['cv_plate'] == plate].copy() + # Randomly select ``n_sessions`` rows. Sampling with replacement is used + # when the number of available rows is smaller than ``n_sessions``. + if n_sessions > df_cv_plate.shape[0]: + df_cv_plate = df_cv_plate.sample(n=n_sessions, replace=True) + else: + df_cv_plate = df_cv_plate.sample(n=n_sessions, replace=False) + + return df_cv_plate['cv_url'].tolist() + +def get_random_block_matrix(df, type, n_sessions): + """Select random block-matrix entries for each session. + + Parameters + ---------- + df : pandas.DataFrame + DataFrame containing columns ``circles``, ``triangles`` and + ``block_matrix_url``. Optionally a ``type`` column may be present. + type : str + When the DataFrame contains a ``type`` column only rows matching this + value are considered. + n_sessions : int + Number of sessions that need a block matrix. + + Returns + ------- + pandas.DataFrame + DataFrame with ``n_sessions`` randomly selected rows. + """ + + cols = ['circles', 'triangles', 'block_matrix_url'] if 'type' in df.columns: cols.append('type') # only keep cols diff --git a/src/create_split_screen_dcr.py b/src/create_split_screen_dcr.py index 022e1fa..73d7cc9 100644 --- a/src/create_split_screen_dcr.py +++ b/src/create_split_screen_dcr.py @@ -19,8 +19,9 @@ tmp_files = [] -def get_video_resolution(video): - # find out the resolution of sample video +def get_video_resolution(video): + """Return the (width, height) of a video file.""" + # find out the resolution of sample video vid = cv2.VideoCapture(video) height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)) width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)) @@ -28,8 +29,9 @@ def get_video_resolution(video): return (width, height) -def create_image(width, height, clip_id, des_folder): - # find a proper font size +def create_image(width, height, clip_id, des_folder): + """Create a simple image containing the clip id as text.""" + # find a proper font size expected_text_width = width * 0.8 percentage = 0 font_size = 15 @@ -59,8 +61,9 @@ def create_image(width, height, clip_id, des_folder): return image_path -def create_video(width, height, clip_id, des_folder): - img_path = create_image(width, height, clip_id, des_folder) +def create_video(width, height, clip_id, des_folder): + """Create a short video showing the clip id.""" + img_path = create_image(width, height, clip_id, des_folder) frame = cv2.imread(img_path) height, width, layers = frame.shape video_path =join(des_folder, f'tmp_{width}_{height}_{clip_id}.png') @@ -72,8 +75,9 @@ def create_video(width, height, clip_id, des_folder): return video_path -def create_split_screen(width, height, des_a, des_b): - # Clip A +def create_split_screen(width, height, des_a, des_b): + """Placeholder for creating a split screen clip.""" + # Clip A return diff --git a/src/master_script.py b/src/master_script.py index 0082509..29cd07e 100644 --- a/src/master_script.py +++ b/src/master_script.py @@ -556,9 +556,25 @@ def get_path(test_method): # checked -async def main(cfg, test_method, args): - - # check assets +async def main(cfg, test_method, args): + """Entry point for the master script. + + This routine validates inputs, prepares the CSV files used by MTurk and + generates the HTML HIT application as well as the result parser + configuration. + + Parameters + ---------- + cfg : configparser.ConfigParser + Loaded configuration file. + test_method : str + One of the supported test methods ('acr', 'dcr', 'acr-hr', 'ccr', + 'avatar'). + args : argparse.Namespace + Parsed command line arguments. + """ + + # check assets general_path = os.path.join(os.path.dirname(__file__), 'assets_master_script/general.csv') internal_general_path = os.path.join(os.path.dirname(__file__), 'assets_master_script/internal_general.csv') if os.path.exists(internal_general_path): diff --git a/src/result_parser.py b/src/result_parser.py index a05b5a9..3f124ae 100644 --- a/src/result_parser.py +++ b/src/result_parser.py @@ -366,6 +366,18 @@ def check_variance(row, method): return -1 def check_gold_question_avatarb(row): + """Check correctness of gold question answers for Avatar-B template. + + Parameters + ---------- + row : pandas.Series + Row from the results CSV containing worker answers. + + Returns + ------- + tuple + (1, details) if answered correctly, otherwise (0, details). + """ correct_gq = 0 details = {} try: @@ -952,8 +964,9 @@ def save_block_list(block_list, path, wrong_v_code_freq): def check_wrong_vcode_should_block(wrong_vcodes): + """Return worker IDs that repeatedly submitted wrong verification codes.""" if wrong_vcodes is None: - return [] + return [] # count the number of wrong verification code per worker small_df = wrong_vcodes[['WorkerId']].copy() grouped = small_df.groupby(['WorkerId']).size().reset_index(name='counts') @@ -1549,8 +1562,9 @@ def number_of_unique_workers(answers, used): def recover_submission_withoiut_matching_vcode(hit_ans, amt_ans, not_in_hitapp): - # iterate over daraftame - for index, row in not_in_hitapp.iterrows(): + """Try to match AMT answers with submissions missing in the HIT app.""" + # iterate over dataframe + for index, row in not_in_hitapp.iterrows(): # find the matching row in amt amt_row = amt_ans[amt_ans['AssignmentId'] == row['Answer.hitapp_assignmentId']] if len(amt_row) == 1: diff --git a/src/template/components/brightness/create_imgs_brightness.py b/src/template/components/brightness/create_imgs_brightness.py index 5a9a330..1243dfe 100644 --- a/src/template/components/brightness/create_imgs_brightness.py +++ b/src/template/components/brightness/create_imgs_brightness.py @@ -63,8 +63,9 @@ def create_image(path, level, text, background): print(e) -def create_image_shapes(): - background = (15, 15, 15) +def create_image_shapes(): + """Create example image containing basic shapes.""" + background = (15, 15, 15) W, H = (500, 500) # create an empty image 500x500 px img = Image.new("RGB", (W, H), background) @@ -75,7 +76,8 @@ def create_image_shapes(): background.save('out.png') -def create_image_range(bg): +def create_image_range(bg): + """Generate a set of images over the defined brightness range.""" background = (bg, bg, bg) levels = range(min_level, max_level+1) path_dir = f"pics_{bg}" @@ -86,7 +88,8 @@ def create_image_range(bg): create_image(path_dir, l, f"{n}", background) -def get_color(background, level): +def get_color(background, level): + """Return a color brighter than ``background`` by ``level``.""" c = (background[0]+level, background[1]+level, background[2]+level) return c @@ -96,7 +99,8 @@ def get_color(background, level): TRIANGLE = 2 -def create_block(background, level, shape): +def create_block(background, level, shape): + """Create a single block with the chosen shape.""" canvas = Image.new('RGB', (100, 100), background) img_draw = ImageDraw.Draw(canvas) front_color = get_color(background, level) @@ -110,7 +114,8 @@ def create_block(background, level, shape): canvas.save('tmp.png') -def create_matrix_image(level): +def create_matrix_image(level): + """Create a matrix image of random shapes for the given level.""" backgrounds = [15, 72, 128, 185, 240] #levels = [2, 3, 4] # levels = [4] @@ -174,7 +179,8 @@ def method2_shapes(): df.to_csv(f'matrix_files-details_{difference_to_bg}.csv') -def print_matrix_name(): +def print_matrix_name(): + """Utility to print encoded matrix names used for debugging.""" data =['Yzo0X3Q6MTA=','Yzo0X3Q6Mw==','Yzo0X3Q6Ng==','Yzo0X3Q6Nw==','Yzo0X3Q6OA==','Yzo1X3Q6Mg==','Yzo1X3Q6Mw==','Yzo1X3Q6NA==','Yzo1X3Q6Ng==','Yzo1X3Q6NQ==','Yzo1X3Q6Nw==','Yzo1X3Q6OA==','Yzo1X3Q6OQ==','Yzo2X3Q6Mg==','Yzo2X3Q6Mw==','Yzo2X3Q6NA==','Yzo2X3Q6Ng==','Yzo2X3Q6NQ==','Yzo2X3Q6Nw==','Yzo2X3Q6OA==','Yzo3X3Q6Mg==','Yzo3X3Q6Mw==','Yzo3X3Q6NA==','Yzo3X3Q6Ng==','Yzo3X3Q6NQ==','Yzo3X3Q6Nw==','Yzo3X3Q6OA==','Yzo4X3Q6Mg==','Yzo4X3Q6Mw==','Yzo4X3Q6NA==','Yzo4X3Q6Ng==','Yzo4X3Q6NQ==','Yzo4X3Q6Nw==','Yzo5X3Q6Mg==','Yzo5X3Q6MQ==','Yzo5X3Q6Mw==','Yzo5X3Q6NQ==','YzoxMF90OjM=','YzoxMV90OjM=','YzoxX3Q6MTI=','YzoxX3Q6Ng==','YzoxX3Q6NQ==','YzoxX3Q6Nw==','YzoyX3Q6MTA=','YzoyX3Q6Ng==','YzozX3Q6MTA=','YzozX3Q6NQ==','YzozX3Q6Nw==','YzozX3Q6OA==','YzozX3Q6OQ=='] df = pd.DataFrame(columns=['name', 'c', 't']) for c in range (0,17): diff --git a/src/template/components/viewing_distance/script/blur_images.py b/src/template/components/viewing_distance/script/blur_images.py index e312d80..4310dbc 100644 --- a/src/template/components/viewing_distance/script/blur_images.py +++ b/src/template/components/viewing_distance/script/blur_images.py @@ -11,10 +11,11 @@ from os import path -def blur_img(source, output, r): - source_img = Image.open(source) - out_img = source_img.filter(ImageFilter.BoxBlur(r)) - out_img.save(output) +def blur_img(source, output, r): + """Blur ``source`` image with radius ``r`` and save it to ``output``.""" + source_img = Image.open(source) + out_img = source_img.filter(ImageFilter.BoxBlur(r)) + out_img.save(output) if __name__ == '__main__': diff --git a/src/trapping_clips/create_trapping_clips.py b/src/trapping_clips/create_trapping_clips.py index b6cf67a..3e15cf1 100644 --- a/src/trapping_clips/create_trapping_clips.py +++ b/src/trapping_clips/create_trapping_clips.py @@ -77,9 +77,9 @@ def create_msg_img(cfg, score, des, v_width, v_height): percentage = 0 font_size = 15 - score_text = str(score) - if args.avatar and 'avatar_rating_answers' in cfg: - score_text = {json.loads(cfg['avatar_rating_answers'])[str(score)] + score_text = str(score) + if args.avatar and 'avatar_rating_answers' in cfg: + score_text = json.loads(cfg['avatar_rating_answers'])[str(score)] if len(cfg['message_line1'].format(score_text)) > len(cfg['message_line2'].format(score_text)): text = cfg['message_line1'].format(score_text)