diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml new file mode 100644 index 00000000..1dce819e --- /dev/null +++ b/.idea/inspectionProfiles/Project_Default.xml @@ -0,0 +1,176 @@ + + + + \ No newline at end of file diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml new file mode 100644 index 00000000..105ce2da --- /dev/null +++ b/.idea/inspectionProfiles/profiles_settings.xml @@ -0,0 +1,6 @@ + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 00000000..70057c2d --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,7 @@ + + + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 00000000..ad48f66c --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/vcs.xml b/.idea/vcs.xml new file mode 100644 index 00000000..53109897 --- /dev/null +++ b/.idea/vcs.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/workspace.xml b/.idea/workspace.xml new file mode 100644 index 00000000..575f03da --- /dev/null +++ b/.idea/workspace.xml @@ -0,0 +1,409 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 1614415693008 + + + + + + + + + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 297 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 310 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 304 + + + file://$PROJECT_DIR$/Scripts/xfactor_main.py + 50 + + + file://$PROJECT_DIR$/Scripts/Services/ImageMatchingServices/main_image_matching_service.py + 91 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 202 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 158 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 291 + + + file://$PROJECT_DIR$/Scripts/Services/AwsConnection/aws_s3_bucket_conn.py + 79 + + + file://$PROJECT_DIR$/Scripts/Services/AwsConnection/aws_s3_bucket_conn.py + 70 + + + file://$PROJECT_DIR$/Scripts/Services/ImageMatchingServices/main_image_matching_service.py + 118 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 254 + + + file://$PROJECT_DIR$/Scripts/Services/ImageMatchingServices/main_image_matching_service.py + 103 + + + file://$PROJECT_DIR$/Scripts/Services/ImageMatchingServices/main_image_matching_service.py + 65 + + + file://$PROJECT_DIR$/Scripts/Services/ImageMatchingServices/main_image_matching_service.py + 94 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 199 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 209 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 217 + + + file://$PROJECT_DIR$/Scripts/Services/ImageMatchingServices/main_image_matching_service.py + 130 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 255 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 257 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 265 + + + file://$PROJECT_DIR$/Scripts/Services/RecommendationEngines/similar_looking_recommendations.py + 50 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 388 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 279 + + + file://$PROJECT_DIR$/Scripts/Services/RecommendationEngines/similar_looking_recommendations.py + 86 + + + file://$PROJECT_DIR$/Scripts/Services/RecommendationEngines/similar_looking_recommendations.py + 51 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 576 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 584 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 428 + + + file://$PROJECT_DIR$/Scripts/Services/RecommendationEngines/similar_looking_recommendations.py + 44 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 327 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 448 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 451 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 511 + + + file://$PROJECT_DIR$/Scripts/Services/RecommendationEngines/discovery_page_recommendations.py + 98 + + + file://$PROJECT_DIR$/Scripts/Services/ImageMatchingServices/main_image_matching_service.py + 62 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 86 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 82 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 17 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 39 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 140 + + + file://$PROJECT_DIR$/Scripts/Services/MongoConnection/mongo_connection.py + 142 + + + file://$PROJECT_DIR$/Scripts/Services/AwsConnection/aws_s3_bucket_conn.py + 29 + + + file://$PROJECT_DIR$/Scripts/Services/ImageMatchingServices/inception_blocks_v2.py + 20 + + + file://$PROJECT_DIR$/Scripts/Services/ImageMatchingServices/fr_utils.py + 52 + + + file://$PROJECT_DIR$/Scripts/Services/ImageMatchingServices/main_image_matching_service.py + 171 + + + file://$PROJECT_DIR$/Scripts/Services/ImageMatchingServices/fr_utils.py + 183 + + + file://$PROJECT_DIR$/Scripts/Services/FaceRecognitionLibrary/face_recognition_distance.py + 28 + + + file://$PROJECT_DIR$/Scripts/xfactor_main.py + 90 + + + file://$PROJECT_DIR$/Scripts/xfactor_main.py + 136 + + + file://$PROJECT_DIR$/Scripts/xfactor_main.py + 143 + + + + + \ No newline at end of file diff --git a/.idea/xfactor_image_match_microservices_v3.iml b/.idea/xfactor_image_match_microservices_v3.iml new file mode 100644 index 00000000..e0608460 --- /dev/null +++ b/.idea/xfactor_image_match_microservices_v3.iml @@ -0,0 +1,12 @@ + + + + + + + + + + \ No newline at end of file diff --git a/Configuration/__init__.py b/Configuration/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/Configuration/__pycache__/__init__.cpython-36.pyc b/Configuration/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 00000000..280e55d8 Binary files /dev/null and b/Configuration/__pycache__/__init__.cpython-36.pyc differ diff --git a/Configuration/__pycache__/__init__.cpython-37.pyc b/Configuration/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..d6dfa705 Binary files /dev/null and b/Configuration/__pycache__/__init__.cpython-37.pyc differ diff --git a/Configuration/__pycache__/configreader.cpython-36.pyc b/Configuration/__pycache__/configreader.cpython-36.pyc new file mode 100644 index 00000000..20300dad Binary files /dev/null and b/Configuration/__pycache__/configreader.cpython-36.pyc differ diff --git a/Configuration/__pycache__/configreader.cpython-37.pyc b/Configuration/__pycache__/configreader.cpython-37.pyc new file mode 100644 index 00000000..769499d5 Binary files /dev/null and b/Configuration/__pycache__/configreader.cpython-37.pyc differ diff --git a/Configuration/configreader.py b/Configuration/configreader.py new file mode 100644 index 00000000..271d92a1 --- /dev/null +++ b/Configuration/configreader.py @@ -0,0 +1,13 @@ +""" +Purpose: Read the configurations from YAML file +""" + +import yaml +from Scripts.Utility import utils + +def read_configuration(file_name): + with open(file_name, "r") as fl: + try: + return yaml.load(stream=fl, Loader=yaml.FullLoader) + except yaml.YAMLError as exc: + utils.logger.error("Configuration File Read Error" + str(file_name) + "ERROR" + str(exc)) \ No newline at end of file diff --git a/Configuration/requirements.txt b/Configuration/requirements.txt new file mode 100644 index 00000000..ffed06cb --- /dev/null +++ b/Configuration/requirements.txt @@ -0,0 +1,31 @@ +Flask == 1.1.2 +Flask-Cors == 3.0.8 +Flask-Compress == 1.5.0 +gunicorn==20.0.4 +boto3 == 1.13.1 +opencv-python == 4.2.0.34 +keras == 2.2.5 +tensorflow == 1.14 +# tensorflow-gpu==1.15 +numpy == 1.16.4 +Pillow == 7.1.2 +SQLAlchemy == 1.3.17 +PyMySQL == 0.9.3 +collections-extended == 1.0.3 +matplotlib == 3.2.1 +tqdm == 4.46.0 +h5py==2.9.0 +pandas +pymongo[srv] +#cmake +#dlib==19.22.0 +#face-recognition==1.3.0 + + + + + +# Older versions of TensorFlow +# For releases 1.15 and older, CPU and GPU packages are separate: +# pip install tensorflow==1.15 # CPU +# pip install tensorflow-gpu==1.15 # GPU diff --git a/Configuration/service.yml b/Configuration/service.yml new file mode 100644 index 00000000..eb744c00 --- /dev/null +++ b/Configuration/service.yml @@ -0,0 +1,13 @@ +service_name: xfactor_image_match_microservice_v1 +log_level: DEBUG + +settings: + ip: 0.0.0.0 + port: 4007 + +path: + log_path: "./log" + +mongodb_credentials: + database: 'exfactor' + mongodb_url: "mongodb+srv://afghaniiit:iiitbangalore@cluster0.13dtx.mongodb.net/exfactor?retryWrites=true&w=majority" diff --git a/Constants/__pycache__/const.cpython-36.pyc b/Constants/__pycache__/const.cpython-36.pyc new file mode 100644 index 00000000..3766f706 Binary files /dev/null and b/Constants/__pycache__/const.cpython-36.pyc differ diff --git a/Constants/__pycache__/const.cpython-37.pyc b/Constants/__pycache__/const.cpython-37.pyc new file mode 100644 index 00000000..b0c0efbc Binary files /dev/null and b/Constants/__pycache__/const.cpython-37.pyc differ diff --git a/Constants/const.py b/Constants/const.py new file mode 100644 index 00000000..824c1880 --- /dev/null +++ b/Constants/const.py @@ -0,0 +1,295 @@ +import __root__ +bucket_name = 'xfactor-app' + +image_file_extention = [".jpg", ".jpeg", ".tif", ".tiff", ".gif", ".png", ".psd", ".bmp", ".jfif"] +preference_images_dir = './Data/preference_images_dir' +profile_images_dir = './Data/profile_images_dir' + +compare_from_profile_dp_image = "./Data/compare_profile_images/compare_from_profile_dp_image" +compare_to_test_image = "./Data/compare_profile_images/compare_to_test_image" + +suspects_images_repo = "./Data/experiment_images_dir/images_main_repo" +# suspect_test_image = "./Data/experiment_images_dir/test_images_repo" +suspect_test_image = "static/img" + +##---------------------- afghaniiit@gmail.com's MongoDB------------------------------------ + +# table1_profileVectorizedImages = "flask_test_table1_v1" +# table2_preferencesBasedMatchedProfiles = "flask_test_table2_v1" + +##---------------------- Nitin's ex-factor-test MongoDB------------------------------------ +table1_profileVectorizedImages = "ai_table1_profileVectorizedImages" +table2_preferencesBasedMatchedProfiles = "ai_table2_preferencesBasedMatchedProfiles" + + +aws_s3Bucket_url = "https://xfactor-app.s3.ap-south-1.amazonaws.com" + +# aws_s3Bucket_url = "https://exfactor-prod.s3.ap-south-1.amazonaws.com/" + +img_now = "img_now.jpg" +img_normal = "img_normal.jpg" +img_removed = "img_removed.jpg" +img_predict = "img_predicted.jpg" + + +##---------------------- For interests/hobbies ------------------------------------ + +interests_dict = { + "creativity": [ + "art", + "crafts", + "dancing", + "design", + "make-up", + "making videos", + "photography", + "singing", + "writing" + ], + "film & tv": [ + "action and adventure", + "animated", + "anime", + "bollywood", + "comedy", + "cooking shows", + "crime", + "documentaries", + "drama", + "fantasy", + "game shows", + "horror", + "indie", + "mystery", + "reality shows", + "rom-com", + "romance", + "sci-fi", + "superhero", + "thriller" + ], + "food & drink": [ + "beer", + "biryani", + "coffee", + "foodie", + "gin", + "maggi", + "pizza", + "sweet tooth", + "vegan", + "vegatarian", + "whisly", + "wine" + ], + "going out": [ + "bars", + "festivals", + "gigs", + "museaums and galleries", + "nightclubs", + "stand up", + "theatre" + ], + "music": [ + "afro", + "arab", + "blues", + "classical", + "country", + "desi", + "edm", + "eletronic", + "folk & acoustic", + "funk", + "hip hop", + "house", + "indie", + "jazz", + "k-pop", + "latin", + "metal", + "pop", + "punjabi", + "punk", + "r&b", + "rap", + "reggae", + "rock", + "soul", + "sufi", + "techno" + ], + "pets": [ + "birds", + "cats", + "dogs", + "fish", + "lizards", + "rabbits", + "snakes" + ], + "popular": [ + "beaches", + "coffee", + "cooking", + "dogs", + "hiking trips", + "video games" + ], + "reading": [ + "action and adventure", + "biographies", + "classics", + "comedy", + "comic books", + "crime", + "fantasy", + "history", + "horror", + "manga", + "mystery", + "philosophy", + "poetry", + "psychology", + "romance", + "sci-fi", + "science", + "thriller" + ], + "sports": [ + "americal football", + "athletics", + "badminton", + "baseball", + "boxing", + "cricket", + "cycling", + "football", + "golf", + "gym", + "gymnastics", + "martial arts", + "meditation", + "running", + "skiing", + "surfing", + "swimming", + "table tennis", + "volleyball", + "yoga" + ], + "staying in": [ + "baking", + "board games", + "cooking", + "gardening", + "takeaways", + "video games" + ], + "travelling": [ + "backpacking", + "beaches", + "camping", + "city breaks", + "country escapes", + "fishing trips", + "hiking trips", + "road trips", + "spa weekends", + "winter sports" + ], + "values & traits": [ + "ambition", + "being active", + "being family-oriented", + "being open-minded", + "being romantic", + "confidence", + "creativity", + "empathy", + "intelligence", + "positivity", + "self-awareness", + "sense of adventure", + "sense of humour", + "social awareness" + ] + } + + +# users_df_columns = ['user_id', 'name', 'sex_orientation', 'gender', 'dob','location'] +# media_df_columns = ['media_id', 'user_id', 'type', 'media_url', 'dp'] +# +# to_match_preference_image_folder = "./Assets/Data/Images/TestCases" +# match_with_profile_images_folder = "./Assets/Data/Images/RefProfileImages" +# +# pickled_database_path = "./Assets/Data/Images/RefProfileImages/PickledFile_reference_images_database.pickle" + + +WEIGHTS = [ + 'conv1', 'bn1', 'conv2', 'bn2', 'conv3', 'bn3', + 'inception_3a_1x1_conv', 'inception_3a_1x1_bn', + 'inception_3a_pool_conv', 'inception_3a_pool_bn', + 'inception_3a_5x5_conv1', 'inception_3a_5x5_conv2', 'inception_3a_5x5_bn1', 'inception_3a_5x5_bn2', + 'inception_3a_3x3_conv1', 'inception_3a_3x3_conv2', 'inception_3a_3x3_bn1', 'inception_3a_3x3_bn2', + 'inception_3b_3x3_conv1', 'inception_3b_3x3_conv2', 'inception_3b_3x3_bn1', 'inception_3b_3x3_bn2', + 'inception_3b_5x5_conv1', 'inception_3b_5x5_conv2', 'inception_3b_5x5_bn1', 'inception_3b_5x5_bn2', + 'inception_3b_pool_conv', 'inception_3b_pool_bn', + 'inception_3b_1x1_conv', 'inception_3b_1x1_bn', + 'inception_3c_3x3_conv1', 'inception_3c_3x3_conv2', 'inception_3c_3x3_bn1', 'inception_3c_3x3_bn2', + 'inception_3c_5x5_conv1', 'inception_3c_5x5_conv2', 'inception_3c_5x5_bn1', 'inception_3c_5x5_bn2', + 'inception_4a_3x3_conv1', 'inception_4a_3x3_conv2', 'inception_4a_3x3_bn1', 'inception_4a_3x3_bn2', + 'inception_4a_5x5_conv1', 'inception_4a_5x5_conv2', 'inception_4a_5x5_bn1', 'inception_4a_5x5_bn2', + 'inception_4a_pool_conv', 'inception_4a_pool_bn', + 'inception_4a_1x1_conv', 'inception_4a_1x1_bn', + 'inception_4e_3x3_conv1', 'inception_4e_3x3_conv2', 'inception_4e_3x3_bn1', 'inception_4e_3x3_bn2', + 'inception_4e_5x5_conv1', 'inception_4e_5x5_conv2', 'inception_4e_5x5_bn1', 'inception_4e_5x5_bn2', + 'inception_5a_3x3_conv1', 'inception_5a_3x3_conv2', 'inception_5a_3x3_bn1', 'inception_5a_3x3_bn2', + 'inception_5a_pool_conv', 'inception_5a_pool_bn', + 'inception_5a_1x1_conv', 'inception_5a_1x1_bn', + 'inception_5b_3x3_conv1', 'inception_5b_3x3_conv2', 'inception_5b_3x3_bn1', 'inception_5b_3x3_bn2', + 'inception_5b_pool_conv', 'inception_5b_pool_bn', + 'inception_5b_1x1_conv', 'inception_5b_1x1_bn', + 'dense_layer' +] + +conv_shape = { + 'conv1': [64, 3, 7, 7], + 'conv2': [64, 64, 1, 1], + 'conv3': [192, 64, 3, 3], + 'inception_3a_1x1_conv': [64, 192, 1, 1], + 'inception_3a_pool_conv': [32, 192, 1, 1], + 'inception_3a_5x5_conv1': [16, 192, 1, 1], + 'inception_3a_5x5_conv2': [32, 16, 5, 5], + 'inception_3a_3x3_conv1': [96, 192, 1, 1], + 'inception_3a_3x3_conv2': [128, 96, 3, 3], + 'inception_3b_3x3_conv1': [96, 256, 1, 1], + 'inception_3b_3x3_conv2': [128, 96, 3, 3], + 'inception_3b_5x5_conv1': [32, 256, 1, 1], + 'inception_3b_5x5_conv2': [64, 32, 5, 5], + 'inception_3b_pool_conv': [64, 256, 1, 1], + 'inception_3b_1x1_conv': [64, 256, 1, 1], + 'inception_3c_3x3_conv1': [128, 320, 1, 1], + 'inception_3c_3x3_conv2': [256, 128, 3, 3], + 'inception_3c_5x5_conv1': [32, 320, 1, 1], + 'inception_3c_5x5_conv2': [64, 32, 5, 5], + 'inception_4a_3x3_conv1': [96, 640, 1, 1], + 'inception_4a_3x3_conv2': [192, 96, 3, 3], + 'inception_4a_5x5_conv1': [32, 640, 1, 1,], + 'inception_4a_5x5_conv2': [64, 32, 5, 5], + 'inception_4a_pool_conv': [128, 640, 1, 1], + 'inception_4a_1x1_conv': [256, 640, 1, 1], + 'inception_4e_3x3_conv1': [160, 640, 1, 1], + 'inception_4e_3x3_conv2': [256, 160, 3, 3], + 'inception_4e_5x5_conv1': [64, 640, 1, 1], + 'inception_4e_5x5_conv2': [128, 64, 5, 5], + 'inception_5a_3x3_conv1': [96, 1024, 1, 1], + 'inception_5a_3x3_conv2': [384, 96, 3, 3], + 'inception_5a_pool_conv': [96, 1024, 1, 1], + 'inception_5a_1x1_conv': [256, 1024, 1, 1], + 'inception_5b_3x3_conv1': [96, 736, 1, 1], + 'inception_5b_3x3_conv2': [384, 96, 3, 3], + 'inception_5b_pool_conv': [96, 736, 1, 1], + 'inception_5b_1x1_conv': [256, 736, 1, 1], +} \ No newline at end of file diff --git a/Data/experiment_images_dir/images_main_repo/-328#_78bef103-331b-42df-a91d-a73b2c8de5771621.jpg b/Data/experiment_images_dir/images_main_repo/-328#_78bef103-331b-42df-a91d-a73b2c8de5771621.jpg new file mode 100644 index 00000000..ce8f77bc Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/-328#_78bef103-331b-42df-a91d-a73b2c8de5771621.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/000000#_24da23fb-859b-4238-bc53-32dfac160406823.jpg b/Data/experiment_images_dir/images_main_repo/000000#_24da23fb-859b-4238-bc53-32dfac160406823.jpg new file mode 100644 index 00000000..d2d58605 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/000000#_24da23fb-859b-4238-bc53-32dfac160406823.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/001#_6f8a2522-7e1e-48ac-a8b0-90c6db50465e1572.jpg b/Data/experiment_images_dir/images_main_repo/001#_6f8a2522-7e1e-48ac-a8b0-90c6db50465e1572.jpg new file mode 100644 index 00000000..f0431892 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/001#_6f8a2522-7e1e-48ac-a8b0-90c6db50465e1572.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/01#_ac9c1579-6afa-4aa7-af80-c039152577b01561.jpg b/Data/experiment_images_dir/images_main_repo/01#_ac9c1579-6afa-4aa7-af80-c039152577b01561.jpg new file mode 100644 index 00000000..ed3c255a Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/01#_ac9c1579-6afa-4aa7-af80-c039152577b01561.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/01#_c94a2841-b6fc-452c-855f-101456a339d01561.jpg b/Data/experiment_images_dir/images_main_repo/01#_c94a2841-b6fc-452c-855f-101456a339d01561.jpg new file mode 100644 index 00000000..6887e974 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/01#_c94a2841-b6fc-452c-855f-101456a339d01561.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/02#_17efcb46-42b8-4b98-a65f-a90387a9f0781561.jpg b/Data/experiment_images_dir/images_main_repo/02#_17efcb46-42b8-4b98-a65f-a90387a9f0781561.jpg new file mode 100644 index 00000000..9082ce65 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/02#_17efcb46-42b8-4b98-a65f-a90387a9f0781561.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/02#_5f2b15a8-34f2-4c4e-a822-38c1a1c7f3c1699.jpg b/Data/experiment_images_dir/images_main_repo/02#_5f2b15a8-34f2-4c4e-a822-38c1a1c7f3c1699.jpg new file mode 100644 index 00000000..e9ebd8ab Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/02#_5f2b15a8-34f2-4c4e-a822-38c1a1c7f3c1699.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/02#_ab2c5eb8-274e-4e67-a2a5-8ae954f1e5ac1561.jpg b/Data/experiment_images_dir/images_main_repo/02#_ab2c5eb8-274e-4e67-a2a5-8ae954f1e5ac1561.jpg new file mode 100644 index 00000000..9e6fd4ba Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/02#_ab2c5eb8-274e-4e67-a2a5-8ae954f1e5ac1561.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/03#_1d80f799-0e48-4ce8-a097-b99b6a8b38021561.jpg b/Data/experiment_images_dir/images_main_repo/03#_1d80f799-0e48-4ce8-a097-b99b6a8b38021561.jpg new file mode 100644 index 00000000..46d2b55f Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/03#_1d80f799-0e48-4ce8-a097-b99b6a8b38021561.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/03#_89a8b830-a6aa-4e10-93d8-d14ddc6d72681562.jpg b/Data/experiment_images_dir/images_main_repo/03#_89a8b830-a6aa-4e10-93d8-d14ddc6d72681562.jpg new file mode 100644 index 00000000..b5a7b0da Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/03#_89a8b830-a6aa-4e10-93d8-d14ddc6d72681562.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/03#_9b225e90-09ea-4f03-a17b-5d71434d5b301561.jpg b/Data/experiment_images_dir/images_main_repo/03#_9b225e90-09ea-4f03-a17b-5d71434d5b301561.jpg new file mode 100644 index 00000000..cd82ffb1 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/03#_9b225e90-09ea-4f03-a17b-5d71434d5b301561.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/03#_b284922a-b5f8-4d02-96e6-fe039dd018471562.jpg b/Data/experiment_images_dir/images_main_repo/03#_b284922a-b5f8-4d02-96e6-fe039dd018471562.jpg new file mode 100644 index 00000000..efd17de8 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/03#_b284922a-b5f8-4d02-96e6-fe039dd018471562.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/03-22 MALI#_8f0ff0bb-dbd0-4030-b447-c36a5282be70950.jpg b/Data/experiment_images_dir/images_main_repo/03-22 MALI#_8f0ff0bb-dbd0-4030-b447-c36a5282be70950.jpg new file mode 100644 index 00000000..f493d2d1 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/03-22 MALI#_8f0ff0bb-dbd0-4030-b447-c36a5282be70950.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/04#_35016661-b902-401c-bfe2-c7c29dc4e9821561.jpg b/Data/experiment_images_dir/images_main_repo/04#_35016661-b902-401c-bfe2-c7c29dc4e9821561.jpg new file mode 100644 index 00000000..5528b4a2 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/04#_35016661-b902-401c-bfe2-c7c29dc4e9821561.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/05#_03f55ed9-61d2-4962-95ac-e7e60f5d8e911567.jpg b/Data/experiment_images_dir/images_main_repo/05#_03f55ed9-61d2-4962-95ac-e7e60f5d8e911567.jpg new file mode 100644 index 00000000..c6da3aec Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/05#_03f55ed9-61d2-4962-95ac-e7e60f5d8e911567.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/1#_492ccd82-f792-4576-b1d0-629531c321b71163.jpg b/Data/experiment_images_dir/images_main_repo/1#_492ccd82-f792-4576-b1d0-629531c321b71163.jpg new file mode 100644 index 00000000..a901cbb1 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/1#_492ccd82-f792-4576-b1d0-629531c321b71163.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/1#_633716b6-4cc4-4727-a08a-f6d629ff07511095.jpg b/Data/experiment_images_dir/images_main_repo/1#_633716b6-4cc4-4727-a08a-f6d629ff07511095.jpg new file mode 100644 index 00000000..a95adac4 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/1#_633716b6-4cc4-4727-a08a-f6d629ff07511095.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/1#_a0fdb35d-5f7b-420e-b2c4-5c0a30309f4e2151.jpg b/Data/experiment_images_dir/images_main_repo/1#_a0fdb35d-5f7b-420e-b2c4-5c0a30309f4e2151.jpg new file mode 100644 index 00000000..3dd89392 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/1#_a0fdb35d-5f7b-420e-b2c4-5c0a30309f4e2151.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/1#_d8aefc8c-29eb-4e36-9d83-bd15b40a67c7107.jpg b/Data/experiment_images_dir/images_main_repo/1#_d8aefc8c-29eb-4e36-9d83-bd15b40a67c7107.jpg new file mode 100644 index 00000000..7a05ccd7 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/1#_d8aefc8c-29eb-4e36-9d83-bd15b40a67c7107.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/1#_e0bec096-b354-4c3a-8635-cb5955190b6697.jpg b/Data/experiment_images_dir/images_main_repo/1#_e0bec096-b354-4c3a-8635-cb5955190b6697.jpg new file mode 100644 index 00000000..d9c615da Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/1#_e0bec096-b354-4c3a-8635-cb5955190b6697.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/1-Muthu fr#_0361a74c-3623-49e4-9ff8-e066e6c9293d1743.jpg b/Data/experiment_images_dir/images_main_repo/1-Muthu fr#_0361a74c-3623-49e4-9ff8-e066e6c9293d1743.jpg new file mode 100644 index 00000000..03faf6dd Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/1-Muthu fr#_0361a74c-3623-49e4-9ff8-e066e6c9293d1743.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/2#_07326d5f-a904-4c5c-8acd-caacfd23402897.jpg b/Data/experiment_images_dir/images_main_repo/2#_07326d5f-a904-4c5c-8acd-caacfd23402897.jpg new file mode 100644 index 00000000..155dbb5f Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/2#_07326d5f-a904-4c5c-8acd-caacfd23402897.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/2#_53b8c299-12f1-4998-b32e-cf32e3bbade11816.png b/Data/experiment_images_dir/images_main_repo/2#_53b8c299-12f1-4998-b32e-cf32e3bbade11816.png new file mode 100644 index 00000000..96b620e3 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/2#_53b8c299-12f1-4998-b32e-cf32e3bbade11816.png differ diff --git a/Data/experiment_images_dir/images_main_repo/2#_8f8f8954-07e6-4d9e-a24e-3bac440d66b61816.png b/Data/experiment_images_dir/images_main_repo/2#_8f8f8954-07e6-4d9e-a24e-3bac440d66b61816.png new file mode 100644 index 00000000..96b620e3 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/2#_8f8f8954-07e6-4d9e-a24e-3bac440d66b61816.png differ diff --git a/Data/experiment_images_dir/images_main_repo/2#_b7eafba1-f58b-47ed-9ad6-5fc4dbe0fa2b107.jpg b/Data/experiment_images_dir/images_main_repo/2#_b7eafba1-f58b-47ed-9ad6-5fc4dbe0fa2b107.jpg new file mode 100644 index 00000000..de1073a2 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/2#_b7eafba1-f58b-47ed-9ad6-5fc4dbe0fa2b107.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/2#_cbf5ffc6-8f4c-4374-8b51-ae72169a14771095.jpg b/Data/experiment_images_dir/images_main_repo/2#_cbf5ffc6-8f4c-4374-8b51-ae72169a14771095.jpg new file mode 100644 index 00000000..ec9e9629 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/2#_cbf5ffc6-8f4c-4374-8b51-ae72169a14771095.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/3#_db933824-1206-4477-8240-b6271f0e2d61107.jpg b/Data/experiment_images_dir/images_main_repo/3#_db933824-1206-4477-8240-b6271f0e2d61107.jpg new file mode 100644 index 00000000..089237c0 Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/3#_db933824-1206-4477-8240-b6271f0e2d61107.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/4#_02a109bb-4a4e-4f65-bac5-695b136d32121095.jpg b/Data/experiment_images_dir/images_main_repo/4#_02a109bb-4a4e-4f65-bac5-695b136d32121095.jpg new file mode 100644 index 00000000..cac7211f Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/4#_02a109bb-4a4e-4f65-bac5-695b136d32121095.jpg differ diff --git a/Data/experiment_images_dir/images_main_repo/4-Manoj F#_8e49060a-e188-45a9-a853-06c96b5c84c71743.jpg b/Data/experiment_images_dir/images_main_repo/4-Manoj F#_8e49060a-e188-45a9-a853-06c96b5c84c71743.jpg new file mode 100644 index 00000000..52ec4dcd Binary files /dev/null and b/Data/experiment_images_dir/images_main_repo/4-Manoj F#_8e49060a-e188-45a9-a853-06c96b5c84c71743.jpg differ diff --git a/Data/experiment_images_dir/test_images_repo/1#_633716b6-4cc4-4727-a08a-f6d629ff07511095.jpg b/Data/experiment_images_dir/test_images_repo/1#_633716b6-4cc4-4727-a08a-f6d629ff07511095.jpg new file mode 100644 index 00000000..a95adac4 Binary files /dev/null and b/Data/experiment_images_dir/test_images_repo/1#_633716b6-4cc4-4727-a08a-f6d629ff07511095.jpg differ diff --git a/KSP_ElintData_Problem1_SolutionDeck.pdf b/KSP_ElintData_Problem1_SolutionDeck.pdf new file mode 100644 index 00000000..570b433f Binary files /dev/null and b/KSP_ElintData_Problem1_SolutionDeck.pdf differ diff --git a/Note.md b/Note.md new file mode 100644 index 00000000..f26fcc66 --- /dev/null +++ b/Note.md @@ -0,0 +1,33 @@ +https://stackoverflow.com/questions/43232813/convert-opencv-image-format-to-pil-image-format + + + +Pillow and OpenCV use different formats of images. So you can't just read an image in Pillow and manipulate it into an OpenCV image. +Pillow uses the RGB format as @ZdaR highlighted, and OpenCV uses the BGR format. So, you need a converter to convert from one format to another. + +To convert from PIL image to OpenCV use: + +import cv2 +import numpy as np +from PIL import Image + +pil_image=Image.open("demo2.jpg") # open image using PIL + +# use numpy to convert the pil_image into a numpy array +numpy_image=numpy.array(pil_img) + +# convert to a openCV2 image, notice the COLOR_RGB2BGR which means that +# the color is converted from RGB to BGR format +opencv_image=cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR) +To convert from OpenCV image to PIL image use: + +import cv2 +import numpy as np +from PIL import Image + +opencv_image=cv2.imread("demo2.jpg") # open image using openCV2 + +# convert from openCV2 to PIL. Notice the COLOR_BGR2RGB which means that +# the color is converted from BGR to RGB +color_coverted = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2RGB) +pil_image=Image.fromarray(color_coverted) \ No newline at end of file diff --git a/README.md b/README.md index 578bb9cc..7607abdc 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # ksp-submission This repository is created for Karnataka State Police Hackathon 2023 - submission collection. ## Team Information -### Team Name - -### Problem Statement - +### Team Name - Elint Data +### Problem Statement - Unified Data Verification diff --git a/Scripts/Services/AwsConnection/__init__.py b/Scripts/Services/AwsConnection/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/Scripts/Services/AwsConnection/__pycache__/__init__.cpython-36.pyc b/Scripts/Services/AwsConnection/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 00000000..e307849c Binary files /dev/null and b/Scripts/Services/AwsConnection/__pycache__/__init__.cpython-36.pyc differ diff --git a/Scripts/Services/AwsConnection/__pycache__/__init__.cpython-37.pyc b/Scripts/Services/AwsConnection/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..a36f86e2 Binary files /dev/null and b/Scripts/Services/AwsConnection/__pycache__/__init__.cpython-37.pyc differ diff --git a/Scripts/Services/AwsConnection/__pycache__/aws_s3_bucket_conn.cpython-36.pyc b/Scripts/Services/AwsConnection/__pycache__/aws_s3_bucket_conn.cpython-36.pyc new file mode 100644 index 00000000..140ec228 Binary files /dev/null and b/Scripts/Services/AwsConnection/__pycache__/aws_s3_bucket_conn.cpython-36.pyc differ diff --git a/Scripts/Services/AwsConnection/__pycache__/aws_s3_bucket_conn.cpython-37.pyc b/Scripts/Services/AwsConnection/__pycache__/aws_s3_bucket_conn.cpython-37.pyc new file mode 100644 index 00000000..23584814 Binary files /dev/null and b/Scripts/Services/AwsConnection/__pycache__/aws_s3_bucket_conn.cpython-37.pyc differ diff --git a/Scripts/Services/AwsConnection/aws_s3_bucket_conn.py b/Scripts/Services/AwsConnection/aws_s3_bucket_conn.py new file mode 100644 index 00000000..408be061 --- /dev/null +++ b/Scripts/Services/AwsConnection/aws_s3_bucket_conn.py @@ -0,0 +1,109 @@ +import os +import boto3 +from PIL import Image +from io import BytesIO +import numpy as np +import matplotlib.pyplot as plt +import pathlib +from Constants import const + +from Scripts.Utility import utils +from Constants import const + + + +class AWS_S3BucketConnection: + + def __init__(self, server): + # aws_key = utils.configuration["aws_s3_bucket_connection"]["access_key_id"] + # aws_secret = utils.configuration['aws_s3_bucket_connection']['secret_access_key'] + # bucket_name = utils.configuration['aws_s3_bucket_connection']['bucket_name'] + # region = utils.configuration['aws_s3_bucket_connection']['region'] + + self.aws_key = utils.configuration["aws_s3_bucket_connection"][server]["aws_access_key_id"] + self.aws_secret = utils.configuration['aws_s3_bucket_connection'][server]['aws_secret_access_key'] + self.bucket_name = utils.configuration['aws_s3_bucket_connection'][server]['bucket_name'] + self.region_name = utils.configuration['aws_s3_bucket_connection'][server]['region'] + + + def download_single_test_image_from_s3Bucket(self, s3_image_key, dest_dir): + s3_client = boto3.client("s3", aws_access_key_id=self.aws_key, aws_secret_access_key=self.aws_secret) + bucket_name = self.bucket_name + #s3_client = boto3.client('s3') + try: + # s3_image_url = os.path.join(const.aws_s3Bucket_url, s3_image_key) + dest_file_name = os.path.join(dest_dir, s3_image_key) + + # If s3_image_key devoid of image file extension then append '.jpg' file extension to "dest_file_name" before downloading to local directory. + # This appending is required because openCV image resize is possible for valid file image file extension only. + if not pathlib.Path(dest_file_name).suffix in const.image_file_extention: + dest_file_name += ".jpg" + + # s3_image_key = "/".join(s3_image_url.rsplit("/")[3:]) + # if s3_image_key.split(".")[-1] in (const.image_file_extention): + s3_client.download_file(bucket_name, s3_image_key, dest_file_name) + return dest_file_name, True + + except Exception as e: + utils.logger.exception("__Error while downloading from S3 Bucket__" + str(e)) + return False, None + + + + def read_image_from_s3_bucket(self, key): + """Load image file from s3. + + Parameters + ---------- + bucket_name: string + Bucket name + key : string + Path in s3 + + Returns + ------- + np array + Image array + """ + try: + region_name = "ap-south-1" + s3_resource = boto3.resource("s3", aws_access_key_id=self.aws_key, aws_secret_access_key=self.aws_secret, region_name=self.region_name) + bucket = s3_resource.Bucket(self.bucket_name) + img_object = bucket.Object(key) + response = img_object.get() + file_stream = response["Body"] + im = Image.open(file_stream) + + # plt.figure(0) + # plt.imshow(im) + + return np.array(im) + except Exception as e: + utils.logger.exception("--ERROR--: read_image_from_s3_bucket" + str(e)) + + + def write_image_to_s3_bucket(self, img_array, bucket, key): + """Write an image array into S3 bucket + + Parameters + ---------- + bucket: string + Bucket name + key : string + Path in s3 + + Returns + ------- + None + """ + try: + + region_name = "ap-south-1" + s3 = boto3.resource('s3', region_name=region_name) + bucket = s3.Bucket(bucket) + object = bucket.Object(key) + file_stream = BytesIO() + im = Image.fromarray(img_array) + im.save(file_stream, format='jpeg') + except Exception as e: + utils.logger.exception("--ERROR--: write_image_to_s3_bucket" + str(e)) \ No newline at end of file diff --git a/Scripts/Services/FaceRecognitionLibrary/__init__.py b/Scripts/Services/FaceRecognitionLibrary/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/Scripts/Services/FaceRecognitionLibrary/__pycache__/__init__.cpython-36.pyc b/Scripts/Services/FaceRecognitionLibrary/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 00000000..9f5dbf20 Binary files /dev/null and b/Scripts/Services/FaceRecognitionLibrary/__pycache__/__init__.cpython-36.pyc differ diff --git a/Scripts/Services/FaceRecognitionLibrary/__pycache__/__init__.cpython-37.pyc b/Scripts/Services/FaceRecognitionLibrary/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..0989f43b Binary files /dev/null and b/Scripts/Services/FaceRecognitionLibrary/__pycache__/__init__.cpython-37.pyc differ diff --git a/Scripts/Services/FaceRecognitionLibrary/__pycache__/face_recognition_distance.cpython-36.pyc b/Scripts/Services/FaceRecognitionLibrary/__pycache__/face_recognition_distance.cpython-36.pyc new file mode 100644 index 00000000..52f95c4f Binary files /dev/null and b/Scripts/Services/FaceRecognitionLibrary/__pycache__/face_recognition_distance.cpython-36.pyc differ diff --git a/Scripts/Services/FaceRecognitionLibrary/__pycache__/face_recognition_distance.cpython-37.pyc b/Scripts/Services/FaceRecognitionLibrary/__pycache__/face_recognition_distance.cpython-37.pyc new file mode 100644 index 00000000..f7d45d7e Binary files /dev/null and b/Scripts/Services/FaceRecognitionLibrary/__pycache__/face_recognition_distance.cpython-37.pyc differ diff --git a/Scripts/Services/FaceRecognitionLibrary/face_recognition_distance.py b/Scripts/Services/FaceRecognitionLibrary/face_recognition_distance.py new file mode 100644 index 00000000..2be74e04 --- /dev/null +++ b/Scripts/Services/FaceRecognitionLibrary/face_recognition_distance.py @@ -0,0 +1,73 @@ +import os +from Constants import const +from Scripts.Utility import utils +import face_recognition +import numpy as np + + +class FaceRecognition: + + def __init__(self): + # from face_recognition import load_image_file, face_encodings, face_distance, compare_faces + pass + + def face_enconding(self, img_file_path): + try: + load_img = face_recognition.load_image_file(img_file_path) + img_encoding = face_recognition.face_encodings(load_img) + + if not len(img_encoding)==0: + return img_encoding[0] + + except Exception as e: + utils.logger.exception("__Error FaceRecognition: FaceEncoding__" + str(e)) + + + def face_distance(self, reference_img_file_path, test_img_file_path): + + try: + reference_img_encoding = self.face_enconding(reference_img_file_path) + test_img_encoding = self.face_enconding(test_img_file_path) + + if isinstance(reference_img_encoding, np.ndarray) and isinstance(test_img_encoding, np.ndarray): + face_dist = face_recognition.face_distance([reference_img_encoding], test_img_encoding) + is_comparable = face_recognition.compare_faces([reference_img_encoding], test_img_encoding) + + print(is_comparable[0]) + print(face_dist) + + return float(round(face_dist[0], 2)), is_comparable[0] + + except Exception as e: + utils.logger.exception("__Error FaceRecognition: FaceDistance " + str(e)) + + + + def face_comparison(self, reference_img_file_path, test_img_file_path): + + try: + reference_img_encoding = self.face_enconding(reference_img_file_path) + test_img_encoding = self.face_enconding(test_img_file_path) + + is_comparable = face_recognition.compare_faces([reference_img_encoding], test_img_encoding) + print(is_comparable[0]) + + return is_comparable[0] + + except Exception as e: + utils.logger.exception("__Error FaceRecognition: FaceComparison__" + str(e)) + + def face_comparison_encoding(self, list_ref_img_encoding, test_img_encoding): + + try: + # reference_img_encoding = self.face_enconding(reference_img_file_path) + # test_img_encoding = self.face_enconding(test_img_file_path) + + is_comparable_list = face_recognition.compare_faces(list_ref_img_encoding, test_img_encoding) + face_dist_list = face_recognition.face_distance(list_ref_img_encoding, test_img_encoding) + # print(is_comparable[0]) + + return is_comparable_list, face_dist_list + + except Exception as e: + utils.logger.exception("__Error FaceRecognition: FaceComparison__" + str(e)) \ No newline at end of file diff --git a/Scripts/Services/ImageMatchingServices/__init__.py b/Scripts/Services/ImageMatchingServices/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/Scripts/Services/ImageMatchingServices/__pycache__/__init__.cpython-36.pyc b/Scripts/Services/ImageMatchingServices/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 00000000..440734b0 Binary files /dev/null and b/Scripts/Services/ImageMatchingServices/__pycache__/__init__.cpython-36.pyc differ diff --git a/Scripts/Services/ImageMatchingServices/__pycache__/__init__.cpython-37.pyc b/Scripts/Services/ImageMatchingServices/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..40bb363d Binary files /dev/null and b/Scripts/Services/ImageMatchingServices/__pycache__/__init__.cpython-37.pyc differ diff --git a/Scripts/Services/ImageMatchingServices/__pycache__/fr_utils.cpython-36.pyc b/Scripts/Services/ImageMatchingServices/__pycache__/fr_utils.cpython-36.pyc new file mode 100644 index 00000000..6794a098 Binary files /dev/null and b/Scripts/Services/ImageMatchingServices/__pycache__/fr_utils.cpython-36.pyc differ diff --git a/Scripts/Services/ImageMatchingServices/__pycache__/fr_utils.cpython-37.pyc b/Scripts/Services/ImageMatchingServices/__pycache__/fr_utils.cpython-37.pyc new file mode 100644 index 00000000..5adc852a Binary files /dev/null and b/Scripts/Services/ImageMatchingServices/__pycache__/fr_utils.cpython-37.pyc differ diff --git a/Scripts/Services/ImageMatchingServices/__pycache__/inception_blocks_v2.cpython-36.pyc b/Scripts/Services/ImageMatchingServices/__pycache__/inception_blocks_v2.cpython-36.pyc new file mode 100644 index 00000000..4b517236 Binary files /dev/null and b/Scripts/Services/ImageMatchingServices/__pycache__/inception_blocks_v2.cpython-36.pyc differ diff --git a/Scripts/Services/ImageMatchingServices/__pycache__/inception_blocks_v2.cpython-37.pyc b/Scripts/Services/ImageMatchingServices/__pycache__/inception_blocks_v2.cpython-37.pyc new file mode 100644 index 00000000..409da2a6 Binary files /dev/null and b/Scripts/Services/ImageMatchingServices/__pycache__/inception_blocks_v2.cpython-37.pyc differ diff --git a/Scripts/Services/ImageMatchingServices/__pycache__/main_image_matching_service.cpython-36.pyc b/Scripts/Services/ImageMatchingServices/__pycache__/main_image_matching_service.cpython-36.pyc new file mode 100644 index 00000000..13031ca4 Binary files /dev/null and b/Scripts/Services/ImageMatchingServices/__pycache__/main_image_matching_service.cpython-36.pyc differ diff --git a/Scripts/Services/ImageMatchingServices/__pycache__/main_image_matching_service.cpython-37.pyc b/Scripts/Services/ImageMatchingServices/__pycache__/main_image_matching_service.cpython-37.pyc new file mode 100644 index 00000000..dff77320 Binary files /dev/null and b/Scripts/Services/ImageMatchingServices/__pycache__/main_image_matching_service.cpython-37.pyc differ diff --git a/Scripts/Services/ImageMatchingServices/__pycache__/preprocess.cpython-36.pyc b/Scripts/Services/ImageMatchingServices/__pycache__/preprocess.cpython-36.pyc new file mode 100644 index 00000000..3128ddff Binary files /dev/null and b/Scripts/Services/ImageMatchingServices/__pycache__/preprocess.cpython-36.pyc differ diff --git a/Scripts/Services/ImageMatchingServices/__pycache__/preprocess.cpython-37.pyc b/Scripts/Services/ImageMatchingServices/__pycache__/preprocess.cpython-37.pyc new file mode 100644 index 00000000..540ea383 Binary files /dev/null and b/Scripts/Services/ImageMatchingServices/__pycache__/preprocess.cpython-37.pyc differ diff --git a/Scripts/Services/ImageMatchingServices/fr_utils.py b/Scripts/Services/ImageMatchingServices/fr_utils.py new file mode 100644 index 00000000..9d7b123b --- /dev/null +++ b/Scripts/Services/ImageMatchingServices/fr_utils.py @@ -0,0 +1,186 @@ +import warnings +warnings.filterwarnings("ignore") +import os +import pickle + +import tensorflow as tf +# from tensorflow.python.keras.backend import _get_session #### New added + +import cv2 +import h5py +import numpy as np +from numpy import genfromtxt +from keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate +from keras.models import Model +from keras.layers.normalization import BatchNormalization +from keras.layers.pooling import MaxPooling2D, AveragePooling2D + +from tqdm import tqdm + +from Constants import const +from Scripts.Utility import utils + +_FLOATX = "float32" + + +def variable(value, dtype=_FLOATX, name=None): + # with graph.as_default(): + v = tf.Variable(np.asarray(value, dtype=dtype), name=name) + _get_session().run(v.initializer) + + return v + +def shape(x): + return x.get_shape() + +def square(x): + return tf.square(x) + +def zeros(shape, dtype=_FLOATX, name=None): + return variable(np.zeros(shape=shape), dtype=dtype, name=name) + +def concatenate(tensors, axis=-1): + if axis < 0: + axis = axis % len(tensors[0].get_shape()) + + return tf.concat(axis, tensors) + +def LRN2D(x): + return tf.nn.lrn(x, alpha=1e-4, beta=0.75) + + +def conv2d_bn(x, + layer=None, + cv1_out=None, + cv1_filter=(1, 1), + cv1_strides=(1, 1), + cv2_out=None, + cv2_filter=(3, 3), + cv2_strides=(1, 1), + padding=None): + + num = "" if cv2_out == None else "1" + tensor = Conv2D(cv1_out, cv1_filter, strides=cv1_strides, data_format="channels_first", name=layer + "_conv" + num)(x) + tensor = BatchNormalization(axis=1, epsilon=1e-5, name=layer + "_bn" + num)(tensor) + tensor = Activation("relu")(tensor) + + if padding == None: + return tensor + + tensor = ZeroPadding2D(padding=padding, data_format="channels_first")(tensor) + + if cv2_out == None: + return tensor + + tensor = Conv2D(cv2_out, cv2_filter, strides=cv2_strides, data_format="channels_first", name=layer + "_conv" + "2")(tensor) + tensor = BatchNormalization(axis=1, epsilon=1e-5, name=layer + "_bn" + "2")(tensor) + tensor = Activation("relu")(tensor) + + return tensor + + +def load_weights_from_FaceNet(FRmodel): + """ + Load weights from csv files (which are exported from Openface torch model) + :param FRmodel: + :return: + """ + try: + weights = const.WEIGHTS + + if not os.path.exists("./Assets/saved_weights_dict.pkl"): + weights_dict = load_weights() + pickled_file = open("./Assets/saved_weights_dict.pkl", "wb") + pickle.dump(weights_dict, pickled_file, protocol=pickle.HIGHEST_PROTOCOL) + pickled_file.close() + elif os.path.exists("./Assets/saved_weights_dict.pkl"): + unpickled_file = open("./Assets/saved_weights_dict.pkl", "rb") + weights_dict = pickle.load(unpickled_file) + else: + raise FileNotFoundError + + # Set layer weights of the model: + for name in weights: + if FRmodel.get_layer(name) != None: + FRmodel.get_layer(name).set_weights(weights_dict[name]) + elif FRmodel.get_layer(name) != None: + FRmodel.get_layer(name).set_weights(weights_dict[name]) + + except Exception as e: + utils.logger.exception("--ERROR-- : load_weights_from_FaceNet ->") + + +def load_weights(): + """ + Set weights path + :return: + """ + dir_path = "./Assets/weights" + file_names = filter(lambda f: not f.startswith("."), os.listdir(dir_path)) + path = dict() + weights_dict = dict() + + for n in file_names: + path[n.replace(".csv", "")] = os.path.join(dir_path, n) + + for name in tqdm(const.WEIGHTS): + if "conv" in name: + conv_w = genfromtxt(path[name + "_w"], delimiter=",", dtype=None) + conv_w = np.reshape(conv_w, const.conv_shape[name]) + conv_w = np.transpose(conv_w, (2, 3, 1, 0)) + conv_b = genfromtxt(path[name + "_b"], delimiter=",", dtype=None) + + weights_dict[name] = [conv_w, conv_b] + + elif "bn" in name: + bn_w = genfromtxt(path[name + "_w"], delimiter=",", dtype=None) + bn_b = genfromtxt(path[name + "_b"], delimiter=",", dtype=None) + bn_m = genfromtxt(path[name + "_m"], delimiter=",", dtype=None) + bn_v = genfromtxt(path[name + "_v"], delimiter=",", dtype=None) + + weights_dict[name] = [bn_w, bn_b, bn_m, bn_v] + + elif "dense" in name: + dense_w = genfromtxt(dir_path + "/dense_w.csv", delimiter=",", dtype=None) + dense_w = np.reshape(dense_w, (128, 736)) + dense_w = np.transpose(dense_w, (1, 0)) + dense_b = genfromtxt(dir_path + "/dense_b.csv", delimiter=",", dtype=None) + + weights_dict[name] = [dense_w, dense_b] + + return weights_dict + + + +def load_dataset(): + #train_dataset = h5py.File("") + pass + +def image_to_encoding_from_imagepath(image_path, model): + try: + img1 = cv2.imread(image_path, 1) + size = (96, 96) + img = img1[..., ::-1] + img = np.around(np.transpose(img, (2, 0, 1)) / 255.0, decimals=12) + x_train = np.array([img]) + embedding = model.predict_on_batch(x_train) + + return embedding + + except Exception as e: + utils.logger.exception("--ERROR-- : image_to_encoding ->" + str(e)) + +def image_to_encoding_from_numyArray(numpy_image, model): + try: + # img1 = cv2.imread(image_path, 1) + size = (96, 96) + opencv_image = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR) # TODO: To convert from PIL image to OpenCV Pillow and OpenCV use different formats of images. So you can't just read an image in Pillow and manipulate it into an OpenCV image. Pillow uses the RGB format, and OpenCV uses the BGR format. So, you need a converter to convert from one format to another. + opencv_image = cv2.resize(opencv_image, size) + img = opencv_image[..., ::-1] + img = np.around(np.transpose(img, (2, 0, 1)) / 255.0, decimals=12) + x_train = np.array([img]) + embedding = model.predict_on_batch(x_train) + + return embedding + except Exception as e: + utils.logger.exception("--ERROR-- : image_to_encoding ->" + str(e)) \ No newline at end of file diff --git a/Scripts/Services/ImageMatchingServices/inception_blocks_v2.py b/Scripts/Services/ImageMatchingServices/inception_blocks_v2.py new file mode 100644 index 00000000..1ab2d4b7 --- /dev/null +++ b/Scripts/Services/ImageMatchingServices/inception_blocks_v2.py @@ -0,0 +1,279 @@ +import warnings +warnings.filterwarnings("ignore") + +from keras import backend as K +from keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate +from keras.models import Model +from keras.layers.normalization import BatchNormalization +from keras.layers.pooling import MaxPooling2D, AveragePooling2D +from keras.layers.core import Lambda, Flatten, Dense + +from Scripts.Services.ImageMatchingServices import fr_utils + +from Scripts.Utility import utils + + +def inception_block_1a(X): + """ + Implementation of an inception block + """ + + X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name='inception_3a_3x3_conv1')(X) + X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn1')(X_3x3) + X_3x3 = Activation('relu')(X_3x3) + X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3) + X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3) + X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3) + X_3x3 = Activation('relu')(X_3x3) + + X_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X) + X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5) + X_5x5 = Activation('relu')(X_5x5) + X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5) + X_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5) + X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5) + X_5x5 = Activation('relu')(X_5x5) + + X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X) + X_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool) + X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool) + X_pool = Activation('relu')(X_pool) + X_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool) + + X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X) + X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1) + X_1x1 = Activation('relu')(X_1x1) + + # CONCAT + inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1) + + return inception + +def inception_block_1b(X): + X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name='inception_3b_3x3_conv1')(X) + X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_3x3_bn1')(X_3x3) + X_3x3 = Activation('relu')(X_3x3) + X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3) + X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3b_3x3_conv2')(X_3x3) + X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_3x3_bn2')(X_3x3) + X_3x3 = Activation('relu')(X_3x3) + + X_5x5 = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3b_5x5_conv1')(X) + X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_5x5_bn1')(X_5x5) + X_5x5 = Activation('relu')(X_5x5) + X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5) + X_5x5 = Conv2D(64, (5, 5), data_format='channels_first', name='inception_3b_5x5_conv2')(X_5x5) + X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_5x5_bn2')(X_5x5) + X_5x5 = Activation('relu')(X_5x5) + + X_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), data_format='channels_first')(X) + X_pool = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3b_pool_conv')(X_pool) + X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_pool_bn')(X_pool) + X_pool = Activation('relu')(X_pool) + X_pool = ZeroPadding2D(padding=(4, 4), data_format='channels_first')(X_pool) + + X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3b_1x1_conv')(X) + X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3b_1x1_bn')(X_1x1) + X_1x1 = Activation('relu')(X_1x1) + + inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1) + + return inception + +def inception_block_1c(X): + X_3x3 = fr_utils.conv2d_bn(X, + layer='inception_3c_3x3', + cv1_out=128, + cv1_filter=(1, 1), + cv2_out=256, + cv2_filter=(3, 3), + cv2_strides=(2, 2), + padding=(1, 1)) + + X_5x5 = fr_utils.conv2d_bn(X, + layer='inception_3c_5x5', + cv1_out=32, + cv1_filter=(1, 1), + cv2_out=64, + cv2_filter=(5, 5), + cv2_strides=(2, 2), + padding=(2, 2)) + + X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X) + X_pool = ZeroPadding2D(padding=((0, 1), (0, 1)), data_format='channels_first')(X_pool) + + inception = concatenate([X_3x3, X_5x5, X_pool], axis=1) + + return inception + +def inception_block_2a(X): + X_3x3 = fr_utils.conv2d_bn(X, + layer='inception_4a_3x3', + cv1_out=96, + cv1_filter=(1, 1), + cv2_out=192, + cv2_filter=(3, 3), + cv2_strides=(1, 1), + padding=(1, 1)) + X_5x5 = fr_utils.conv2d_bn(X, + layer='inception_4a_5x5', + cv1_out=32, + cv1_filter=(1, 1), + cv2_out=64, + cv2_filter=(5, 5), + cv2_strides=(1, 1), + padding=(2, 2)) + + X_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), data_format='channels_first')(X) + X_pool = fr_utils.conv2d_bn(X_pool, + layer='inception_4a_pool', + cv1_out=128, + cv1_filter=(1, 1), + padding=(2, 2)) + X_1x1 = fr_utils.conv2d_bn(X, + layer='inception_4a_1x1', + cv1_out=256, + cv1_filter=(1, 1)) + inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1) + + return inception + +def inception_block_2b(X): + #inception4e + X_3x3 = fr_utils.conv2d_bn(X, + layer='inception_4e_3x3', + cv1_out=160, + cv1_filter=(1, 1), + cv2_out=256, + cv2_filter=(3, 3), + cv2_strides=(2, 2), + padding=(1, 1)) + X_5x5 = fr_utils.conv2d_bn(X, + layer='inception_4e_5x5', + cv1_out=64, + cv1_filter=(1, 1), + cv2_out=128, + cv2_filter=(5, 5), + cv2_strides=(2, 2), + padding=(2, 2)) + + X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X) + X_pool = ZeroPadding2D(padding=((0, 1), (0, 1)), data_format='channels_first')(X_pool) + + inception = concatenate([X_3x3, X_5x5, X_pool], axis=1) + + return inception + +def inception_block_3a(X): + X_3x3 = fr_utils.conv2d_bn(X, + layer='inception_5a_3x3', + cv1_out=96, + cv1_filter=(1, 1), + cv2_out=384, + cv2_filter=(3, 3), + cv2_strides=(1, 1), + padding=(1, 1)) + X_pool = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), data_format='channels_first')(X) + X_pool = fr_utils.conv2d_bn(X_pool, + layer='inception_5a_pool', + cv1_out=96, + cv1_filter=(1, 1), + padding=(1, 1)) + X_1x1 = fr_utils.conv2d_bn(X, + layer='inception_5a_1x1', + cv1_out=256, + cv1_filter=(1, 1)) + + inception = concatenate([X_3x3, X_pool, X_1x1], axis=1) + + return inception + +def inception_block_3b(X): + X_3x3 = fr_utils.conv2d_bn(X, + layer='inception_5b_3x3', + cv1_out=96, + cv1_filter=(1, 1), + cv2_out=384, + cv2_filter=(3, 3), + cv2_strides=(1, 1), + padding=(1, 1)) + X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X) + X_pool = fr_utils.conv2d_bn(X_pool, + layer='inception_5b_pool', + cv1_out=96, + cv1_filter=(1, 1)) + X_pool = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_pool) + + X_1x1 = fr_utils.conv2d_bn(X, + layer='inception_5b_1x1', + cv1_out=256, + cv1_filter=(1, 1)) + inception = concatenate([X_3x3, X_pool, X_1x1], axis=1) + + return inception + +def faceRecoModel(input_shape): + # Clear previous session + K.clear_session() # ValueError: Fetch argument cannot be interpreted as a Tensor. (Tensor Tensor("conv1/kernel:0", shape=(7, 7, 3, 64), dtype=float32_ref) is not an element of this graph.) + + # Define the input as a tensor with shape input_shape + X_input = Input(input_shape) + + + try: + # Zero-Padding + X = ZeroPadding2D((3, 3))(X_input) + + # First Block + X = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(X) + X = BatchNormalization(axis=1, name='bn1')(X) + X = Activation('relu')(X) + + # Zero-Padding + MAXPOOL + X = ZeroPadding2D((1, 1))(X) + X = MaxPooling2D((3, 3), strides=2)(X) + + # Second Block + X = Conv2D(64, (1, 1), strides=(1, 1), name='conv2')(X) + X = BatchNormalization(axis=1, epsilon=0.00001, name='bn2')(X) + X = Activation('relu')(X) + + # Zero-Padding + MAXPOOL + X = ZeroPadding2D((1, 1))(X) + + # Third Block + X = Conv2D(192, (3, 3), strides=(1, 1), name='conv3')(X) + X = BatchNormalization(axis=1, epsilon=0.00001, name='bn3')(X) + X = Activation('relu')(X) + + # Zero-Padding + MAXPOOL + X = ZeroPadding2D((1, 1))(X) + X = MaxPooling2D(pool_size=3, strides=2)(X) + + # Inception 1: a/b/c + X = inception_block_1a(X) + X = inception_block_1b(X) + X = inception_block_1c(X) + + # Inception 2: a/b + X = inception_block_2a(X) + X = inception_block_2b(X) + + # Inception 3: a/b + X = inception_block_3a(X) + X = inception_block_3b(X) + + # Top layer + X = AveragePooling2D(pool_size=(3, 3), strides=(1, 1), data_format='channels_first')(X) + X = Flatten()(X) + X = Dense(128, name='dense_layer')(X) + + # L2 normalization + X = Lambda(lambda x: K.l2_normalize(x, axis=1))(X) + + # Create model instance + model = Model(inputs=X_input, outputs=X, name='FaceRecoModel') + + return model + except Exception as e: + utils.logger.exception("----Error occurred in 'inception_blocks_v2.faceRecoModel' ----->>>>" + str(e)) \ No newline at end of file diff --git a/Scripts/Services/ImageMatchingServices/main_image_matching_service.py b/Scripts/Services/ImageMatchingServices/main_image_matching_service.py new file mode 100644 index 00000000..cdcca92a --- /dev/null +++ b/Scripts/Services/ImageMatchingServices/main_image_matching_service.py @@ -0,0 +1,261 @@ +# from keras.models import Sequential +# from keras.layers import Conv2D, ZeroPadding2D, Activation, Input, Concatenate +# from keras.models import Model +# from keras.layers.normalization import BatchNormalization +# from keras.layers.pooling import MaxPooling2D, AveragePooling2D +# from keras.layers.merge import Concatenate +# from keras.layers.core import Lambda, Flatten, Dense +# from keras.initializers import glorot_uniform +# from keras.engine.topology import Layer +# import random +# import shutil +# from numpy import genfromtxt + + +###################### ----------------------------------------ACTIVE LIBRARIES -----------------------------------################################################################### +import os +import warnings +warnings.filterwarnings("ignore") +import pickle +import numpy as np +import tensorflow as tf +from keras import backend as K +import keras + +K.set_image_data_format("channels_first") +np.set_printoptions(threshold=2**31-1) + +from Scripts.Services.ImageMatchingServices import fr_utils +from Scripts.Services.ImageMatchingServices import inception_blocks_v2 +from Scripts.Services.ImageMatchingServices import preprocess +from Constants import const +from Scripts.Utility import utils +from Scripts.Services.MongoConnection.mongo_connection import MongoConn + + +from pprint import pprint + +class Main: + + # Main function:: + def image_matching_main_service_method(self, case, user_data_json, server): + try: + # with graph.as_default(): + print("(a) Loading Face Recognition Model ...") + # FRModel = inception_blocks_v2.faceRecoModel(input_shape=(3, 96, 96)) + # print("(b) Compiling Face Recognition Model ...") + # FRModel.compile(optimizer='adam', loss=self.triplet_loss, metrics=['accuracy']) + # print("(c) Loading Weights for Face Recognition Model ...") + # fr_utils.load_weights_from_FaceNet(FRModel) + + K.clear_session() # ValueError: Fetch argument cannot be interpreted as a Tensor. (Tensor Tensor("conv1/kernel:0", shape=(7, 7, 3, 64), dtype=float32_ref) is not an element of this graph.) + g = tf.Graph() + with g.as_default(): + FRModel = keras.models.load_model("./Assets/SavedModels/saved_FRmodel.hdf5", compile=False) + # preprocess.preprocess_image_data(location=const.match_with_profile_images_folder) + print("Generating Database of Available Images") + mongo_conn_obj = MongoConn(server=server) + + + if case == 'app1': + + # Preprocessing the profile images uploaded to the size = (96,96) + preprocess.preprocess_image_data(img_file_path=user_data_json['profile_image_file_path']) + + # To vectorize the image from image file path. + profile_image_embedding = self.get_img_embedding_from_image_location(img_file_path=user_data_json['profile_image_file_path'], model=FRModel) + + return profile_image_embedding + + + # preference_img_embeddings_list = mongo_conn_obj.find_preferenceImageVector_from_table2(user_data_json=user_data_json) + # + # dist_dict_list1 = [] + # for preference in preference_img_embeddings_list: + # d = {} + # dist = self.find_distance_from_img_embeddings(img_embedding1=preference["preference_img_embedding"], img_embedding2=profile_image_embedding, model=FRModel) + # d['dist'] = float(dist) + # d['preference_gender'] = preference['preference_gender'] + # d['preference_img_s3key'] = preference['preference_img_s3key'] + # + # dist_dict_list1.append(d) + # + # # To calculate Similarity score. + # for dic_i in dist_dict_list1: + # similarity_score = 1.0 / (1.0 + dic_i['dist']) + # dic_i['match_percentage'] = similarity_score * 100.0 + # dic_i['match_percentage'] = round(dic_i['match_percentage'], 2) # Rounding off the percentage decimal value (default=2) + # dic_i['match_dist'] = float(dic_i['dist']) + # dic_i.pop('dist', None) + + + if case == 'app2': + + # Getting all the profile images vectors (embeddings) from the Table#1. + profile_img_embeddings_list = mongo_conn_obj.find_profileImageVector_from_table1(user_data_json=user_data_json) + + # Preprocessing the preference images uploaded to the size = (96,96) + preprocess.preprocess_image_data(img_file_path=user_data_json['preference_image_file_path']) + + # # To vectorize the image from Numpy Array. + # preference_image_embedding = self.get_img_embedding_from_NumpyArray(img_array=user_data_json["preference_img_nparray"], model=FRModel) + + # To vectorize the image from image file path. + preference_image_embedding = self.get_img_embedding_from_image_location(img_file_path=user_data_json['preference_image_file_path'], model=FRModel) + + dist_dict_list2 = [] + for profile in profile_img_embeddings_list: + d = {} + dist = self.find_distance_from_img_embeddings(img_embedding1=profile['profile_image_embedding'], img_embedding2=preference_image_embedding) + d['dist'] = float(dist) + d['match_user_id'] = profile['user_id'] + d['match_gender'] = profile['gender'] + # d['match_media_id'] = profile['media_id'] + # d['profile_img_s3key'] = profile['profile_img_s3key'] + + dist_dict_list2.append(d) + + # keys_tuple = self.key_with_max_and_min_val(dist_dict) + # max_dist = (max(dist_dict_list2, key=lambda x: x['dist']))['dist'] + # min_dist = (min(dist_dict_list2, key=lambda x: x['dist']))['dist'] + + + for dic_i in dist_dict_list2: + similarity_score = 1.0 / (1.0 + dic_i['dist']) + dic_i['match_percentage'] = similarity_score * 100.0 + dic_i['match_percentage'] = round(dic_i['match_percentage'], 2) # Rounding off the percentage decimal value (default=2) + dic_i['match_dist'] = float(dic_i['dist']) + dic_i.pop('dist', None) + + # pprint(dist_dict_list) + return dist_dict_list2, preference_image_embedding + + except Exception as e: + utils.logger.exception("__ERROR__ ->" + str(e)) + + + + # Geometrical Distance between Images + def find_distance_from_img_embeddings(self, img_embedding1, img_embedding2): + try: + # encoding = fr_utils.image_to_encoding(image_path, model) + dist = np.linalg.norm(img_embedding2 - img_embedding1) + + return dist + except Exception as e: + utils.logger.exception("--Error: Geometrical Distance between Images ->" + str(e)) + + + + # Preprocess Images in a location/directory + def get_img_embedding_from_NumpyArray(self, img_array, model): + ''' + Input: numpy array. + Output: image embedding + ''' + try: + + img_embedding = fr_utils.image_to_encoding_from_numyArray(numpy_image=img_array, model=model) + + return img_embedding + except Exception as e: + utils.logger.exception("--Error: Preprocess Images in a location/directory ->" + str(e)) + + + def get_img_embedding_from_image_location(self, img_file_path, model): + ''' + Input: file location/path + Output: image embedding + ''' + try: + + img_embedding = fr_utils.image_to_encoding_from_imagepath(image_path=img_file_path, model=model) + + return img_embedding + except Exception as e: + utils.logger.exception("--Error: Preprocess Images in a location/directory ->" + str(e)) + + + def key_with_max_and_min_val(self, dic): + try: + """ a) create a list of the dict's keys and values; + b) return the key with the max value""" + v = list(dic.values()) + k = list(dic.keys()) + + return k[v.index(max(v))], k[v.index(min(v))] + except Exception as e: + utils.logger.exception("--Error: key_with_max_and_min_val ->" + str(e)) + + + # Triplet Loss Function + def triplet_loss(self, y_true, y_pred, alpha=0.2): + try: + anchor = y_pred[0] + positive = y_pred[1] + negative = y_pred[2] + pos_dist = tf.reduce_sum(tf.square(anchor - positive), axis=-1) + neg_dist = tf.reduce_sum(tf.square(anchor - negative), axis=-1) + basic_loss = pos_dist - neg_dist + alpha + loss = tf.reduce_sum(tf.maximum(basic_loss, 0.0)) + + return loss + except Exception as e: + utils.logger.exception("--ERROR: triple_loss " + str(e)) + + + + + + +#-------------------------------- Having ImagePath as parameter for vectorizing images and finding distances--------------- + + # Preprocess Images in a location/directory + def create_datadict(self, database_location, model): + try: + data_dict = dict() + + for celeb in os.listdir(database_location): + data_dict.update({celeb: fr_utils.image_to_encoding_from_imagepath(image_path=os.path.join(database_location, celeb), model=model)}) + + return data_dict + except Exception as e: + utils.logger.exception("--Error: Preprocess Images in a location/directory ->" + str(e)) + + + def find_image_distances(self, image_location, database, model): + try: + dist_dict = {} + for id in database.keys(): + dist = self.find_distance(image_path=image_location, identity=id, database=database, model=model) + dist_dict.update({id: dist}) + + return dist_dict + except Exception as e: + utils.logger.exception("--Error: Find Image Distances ->" + str(e)) + + # Geometrical Distance between Images + def find_distance(self, image_path, identity, database, model): + try: + encoding = fr_utils.image_to_encoding_from_imagepath(image_path, model) + dist = np.linalg.norm(encoding - database[identity]) + + return dist + except Exception as e: + utils.logger.exception("--Error: Geometrical Distance between Images ->" + str(e)) + + + + # def display_image(self, image_loc, name): + # try: + # fig, axes = plt.subplots(1, 2, figsize=(18, 5)) + # fig.subplots_adjust(hspace=0.5, wspace=0.5) + # for i, ax in enumerate(axes.flat): + # ax.imshow(cv2.cvtColor(cv2.imread(image_loc[i]), cv2.COLOR_BGR2RGB)) + # ax.set_xlabel(name[i]) + # ax.set_xticks([]) + # ax.set_yticks([]) + # plt.show() + # except Exception as e: + # utils.logger.exception("--Error: Display Images" + str(e)) + diff --git a/Scripts/Services/ImageMatchingServices/preprocess.py b/Scripts/Services/ImageMatchingServices/preprocess.py new file mode 100644 index 00000000..46984971 --- /dev/null +++ b/Scripts/Services/ImageMatchingServices/preprocess.py @@ -0,0 +1,21 @@ +import os +import cv2 +import pathlib +from Constants import const + +from Scripts.Utility import utils + +def preprocess_image_data(img_file_path): + try: + # img_file_path = os.path.join(location, file) + size = (96, 96) + img = cv2.resize(cv2.imread(img_file_path), size) + cv2.imwrite(img_file_path, img) + + except Exception as e: + utils.logger.exception("--Preprocessing Error--" + str(e)) + + + + + diff --git a/Scripts/Services/MongoConnection/__init__.py b/Scripts/Services/MongoConnection/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/Scripts/Services/MongoConnection/__pycache__/__init__.cpython-36.pyc b/Scripts/Services/MongoConnection/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 00000000..f7068610 Binary files /dev/null and b/Scripts/Services/MongoConnection/__pycache__/__init__.cpython-36.pyc differ diff --git a/Scripts/Services/MongoConnection/__pycache__/__init__.cpython-37.pyc b/Scripts/Services/MongoConnection/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..aea0542f Binary files /dev/null and b/Scripts/Services/MongoConnection/__pycache__/__init__.cpython-37.pyc differ diff --git a/Scripts/Services/MongoConnection/__pycache__/mongo_connection.cpython-36.pyc b/Scripts/Services/MongoConnection/__pycache__/mongo_connection.cpython-36.pyc new file mode 100644 index 00000000..c87366b5 Binary files /dev/null and b/Scripts/Services/MongoConnection/__pycache__/mongo_connection.cpython-36.pyc differ diff --git a/Scripts/Services/MongoConnection/__pycache__/mongo_connection.cpython-37.pyc b/Scripts/Services/MongoConnection/__pycache__/mongo_connection.cpython-37.pyc new file mode 100644 index 00000000..a29ecdd9 Binary files /dev/null and b/Scripts/Services/MongoConnection/__pycache__/mongo_connection.cpython-37.pyc differ diff --git a/Scripts/Services/MongoConnection/mongo_connection.py b/Scripts/Services/MongoConnection/mongo_connection.py new file mode 100644 index 00000000..0e5f4e2e --- /dev/null +++ b/Scripts/Services/MongoConnection/mongo_connection.py @@ -0,0 +1,588 @@ +import pandas as pd +from pymongo import MongoClient +from bson.binary import Binary +import pickle +from pprint import pprint +from Constants import const +from Scripts.Utility import utils +import numpy as np + + + +class MongoConn: + def __init__(self, server): + # self.mongo_atlas_srv = utils.configuration["mongodb_credentials"]["mongodb_url_production"] + # self.client = MongoClient(self.mongo_atlas_srv) + # self.db = self.client[utils.configuration["mongodb_credentials"]["database_production"]] + + self.mongo_atlas_srv = utils.configuration["mongodb_credentials"]["mongodb_url"] + self.client = MongoClient(self.mongo_atlas_srv) + self.db = self.client[utils.configuration["mongodb_credentials"]["database"]] + +# *********************************************************************************************************************************************************************************************** +# *********************************************************************************************************************************************************************************************** +# *********************************************************************************************************************************************************************************************** +# *********************************************************************************************************************************************************************************************** +# *********************************************************************************************************************************************************************************************** + + """ + INSERTING / UPDATING INTO MONGO DATABASE + + """ + #------------------------------------------- Inserting Documents into Table_1 -------------------------------------- + + def insert_profileImageVector_into_Table_1(self, update_profile_table1): + ''' + Insert Binary numpy array of profile images along with other data into Table_1. + # ConnectionErrorConvert numpy array to Binary, store record in mongodb + ''' + try: + update_profile_table1['profile_image_embedding'] = Binary(pickle.dumps(update_profile_table1['profile_image_embedding'], protocol=2), subtype=128) + + self.db[const.table1_profileVectorizedImages].update_one(filter={ 'user_id': update_profile_table1['user_id'], + 'profile_image_s3key': update_profile_table1['profile_image_s3key'], + 'type': update_profile_table1['type'], + 'is_dp': update_profile_table1['is_dp'], + 'gender': update_profile_table1['gender'] + }, + update={"$set": {'user_id': update_profile_table1['user_id'], + 'profile_image_s3key': update_profile_table1['profile_image_s3key'], + 'type': update_profile_table1['type'], + 'is_dp': update_profile_table1['is_dp'], + 'gender': update_profile_table1['gender'], + 'profile_image_embedding': update_profile_table1['profile_image_embedding']} + }, + upsert=True) + + return {"status" : 1, "message": const.table1_profileVectorizedImages + " is updated"} + + except Exception as e: + utils.logger.error("Exception occurred while inserting image vectors of profile image into Table_1 : " + str(e)) + return {"status": -1, "message": const.table1_profileVectorizedImages + " is NOT updated"} + + # ------------------------------------------- Inserting Documents into table_2 ------------------------------------- + + def push_newProfileImgVector_into_matchedUsersArray_table2(self, push_profile_table2): + ''' + Insert matched users list with percentage match along with other details into Table_2. + This is updated/pushed only when a user adds a new profile images(whether DP or not DP) or a new user sign up and then add his profile image(s). + Idea is to find match of this "new profile image" w.r.t. all the "preference_images" in Table#2, calculate distances and append into the list. + ``` + for item in Table#2 : + if ( gender(item[user_id] ) != gender(new_image) && (gender(item[preference_image] ) == gender(new_profile_image)) ): + dist = CalculateDistance( item[preference_image_vector], new_profile_image_vector ) + Table#2[item].add( dist, new_profile_image ) + ``` + In this table we have 'user_id' and 'preference_image_id' are primary key. + ''' + try: + # push_data_matched_users_array_table2['preference_image_embedding'] = Binary(pickle.dumps(push_data_matched_users_array_table2['preference_image_embedding'], protocol=2), subtype=128) + + for item in self.db[const.table2_preferencesBasedMatchedProfiles].find(): + print(item) + item_gender = list(self.db[const.table1_profileVectorizedImages].find({"user_id": item["user_id"]}))[0]["gender"] + + # if ("sexual_orientation == "Straight") + if item_gender != push_profile_table2["gender"]: + img_embedding2 = pickle.loads(item["preference_image_embedding"]) + img_embedding1 = pickle.loads(push_profile_table2["profile_image_embedding"]) + + dist = np.linalg.norm(img_embedding2 - img_embedding1) # Geometrical Distance between Images + dist = float(dist) # Converting 'numpy.float32' datatype into 'float' + similarity_score = (1.0 / (1.0 + dist)) * 100.0 # Percentage similarity score + similarity_score = round(similarity_score, 2) # Rounding off the percentage decimal value (default=2) + + + self.db[const.table2_preferencesBasedMatchedProfiles].update_one( + filter={'user_id': item['user_id'], + 'preference_image_s3key': item['preference_image_s3key'], + }, + update={"$addToSet": {'matched_users': + {'match_user_id': push_profile_table2["user_id"], + 'match_gender': push_profile_table2["gender"], + 'match_img_s3key': push_profile_table2['profile_image_s3key'], + 'match_percentage': similarity_score, + 'match_dist': dist, + } + } + }, + upsert=True) + # + # # self.db[const.table2_preferencesBasedMatchedProfiles].replace_one(filter={'user_id': record_for_table2['user_id'], + # # 'preference_media_url_objKey': record_for_table2['preference_media_url_objKey'], + # # }, + # # + # # + # # update={"$set": {'user_id': record_for_table2['user_id'], + # # 'preference_media_url_objKey': record_for_table2['preference_media_url_objKey'], + # # 'matched_users': record_for_table2['matched_users'] + # # } + # # }, + # # upsert=True) + # + return {"status": 1, "message": const.table2_preferencesBasedMatchedProfiles + " is pushed for each user "} + + except Exception as e: + utils.logger.error("Exception occurred while inserting %age matched users into Table_2 : " + str(e)) + return {"status": -1, "message": const.table2_preferencesBasedMatchedProfiles + " is NOT updated"} + + + + + + # ------------------------------------------- Inserting Documents into table_2 ------------------------------------- + + def insert_matched_users_with_percentage_table2(self, update_data_table2): + ''' + Insert matched users list with percentage match along with other details into Table_2. + In this table we have 'user_id' and 'preference_image_id' are primary key. + ''' + try: + update_data_table2['preference_image_embedding'] = Binary(pickle.dumps(update_data_table2['preference_image_embedding'], protocol=2), subtype=128) + + self.db[const.table2_preferencesBasedMatchedProfiles].update_one(filter={'user_id': update_data_table2['user_id'], + 'preference_image_s3key': update_data_table2['preference_image_s3key'], + }, + update={"$set": + { + 'user_id': update_data_table2['user_id'], + 'gender': update_data_table2['gender'], + 'sexual_orientation': update_data_table2['sex_orientation'], + 'preference_image_s3key': update_data_table2['preference_image_s3key'], + 'preference_image_embedding': update_data_table2['preference_image_embedding'], + 'matched_users': update_data_table2['matched_users'] + } + }, + upsert=True) + + + return {"status": 1, "message": const.table2_preferencesBasedMatchedProfiles + " is updated"} + + except Exception as e: + utils.logger.error("Exception occurred while inserting %age matched users into Table_2 : " + str(e)) + return {"status": -1, "message": const.table2_preferencesBasedMatchedProfiles + " is NOT updated"} + + + + + + + + + + +#*********************************************************************************************************************************************************************************************** +#*********************************************************************************************************************************************************************************************** +#*********************************************************************************************************************************************************************************************** +#*********************************************************************************************************************************************************************************************** +#*********************************************************************************************************************************************************************************************** + + """ + SEARCHING FROM MONGO DATABASE + + """ + +#------------------------------------------------- SEARCHING FROM MONGODB COLLECTIONS -------------------------------------------------------------------------------------------------------------- + + # ------------------------------------------- Searching profiles with right query from table_1 --------------------- + def find_profileImageVector_from_table1(self, user_data_json): + ''' + Searching profiles with right query from table_1 + :param user_data_json: + :return: profile_image_embeddings_list + ''' + mongo_query_table1 = { + 'type': 'profile', + 'is_dp': True + } + + if user_data_json["gender"] == 'Male': + mongo_query_table1['gender'] = 'Female' + + elif user_data_json["gender"] == 'Female': + mongo_query_table1['gender'] = 'Male' + + else: + utils.logger.error("Query is not formatted because 'user_gender' is not entered--") + return "Please enter correct information" + + + profiles_list_from_mongodb = list(self.db[const.table1_profileVectorizedImages].find(mongo_query_table1)) + profile_image_embeddings_list = [] + + for i in profiles_list_from_mongodb: + # print(i.keys()) + i['profile_image_embedding'] = pickle.loads(i['profile_image_embedding']) + profile_image_embeddings_list.append(i) + + return profile_image_embeddings_list + + + + # ------------------------------------------- Searching users with uploaded preference images from table_2 --------- + + def find_preferenceImageVector_from_table2(self, user_data_json): + ''' + Searching All preference image_vectors from table_2 to push new profile image matching into matched_users (array) into table2. + :param user_data_json: + :return: list of image_vectors + ''' + + mongo_query_table2 = { + 'preference_gender': user_data_json["preference_gender"] + } + + preference_list_from_mongodb = list(self.db[const.table2_preferencesBasedMatchedProfiles].find(mongo_query_table2)) + preference_image_embeddings_list = [] + + for i in preference_list_from_mongodb: + # print(i.keys()) + i['preference_image_embedding'] = pickle.loads(i['preference_image_embedding']) + preference_image_embeddings_list.append(i) + + return preference_image_embeddings_list + + + # ------------------------------------------- Searching matched users from table_2 --------------------------------- + def find_matched_users_from_table2(self, user_id, match_percentage, preference_image_s3key): + ''' + Searching matched users from table_2 + :param user_id: + :param match_percentage: + :param preference_image_s3key: + :return: + ''' + result = [] + matched_users_list = list(self.db[const.table2_preferencesBasedMatchedProfiles].find({"user_id": user_id, "preference_image_s3key": preference_image_s3key})) + + if len(matched_users_list) == 0: + pass + else: + for i in matched_users_list: + for j in i["matched_users"]: + if j["match_percentage"] >= match_percentage: + result.append(j) + + result = sorted(result, key=lambda k: k["match_percentage"], reverse=True) + + return result + + + def find_default_users_from_table2(self, user_id, match_percentage): + ''' + Searching matched users from table_2 when + :param user_id: + :param match_percentage: + :param preference_image_s3key: + :return: + ''' + result = [] + matched_users_list = list(self.db[const.table2_preferencesBasedMatchedProfiles].find( {"user_id": user_id} )) + + if len(matched_users_list) == 0: + pass + else: + for i in matched_users_list: + for j in i["matched_users"]: + if j["match_percentage"] >= match_percentage: + result.append(j) + + result = sorted(result, key=lambda k: k["match_percentage"], reverse=True) + + return result + + + # def find_matched_users2(self, user_id, match_percentage): + # + # result = [] + # matched_users_list = list(self.db[const.table2_preferencesBasedMatchedProfiles].find({})) + # + # for i in matched_users_list: + # if i['user_id'] == user_id: + # matched_users_list = i['matched_users'] + # + # + # for i in matched_users_list: + # if i["match_percentage"] >= match_percentage: + # result.append(i) + # + # result = sorted(result, key=lambda k: k["match_percentage"], reverse=True) + # + # return result + + + + +# ------------------------------------------------- MONGODB SEARCH FOR RECOMMENDATIONS------------------------------------------------ + + # ------------------------------------------- Searching profile with given 'user_id' from the 'users' collection. --------------------- + + def find_user_data(self, user_id): + ''' + Searching user's data with given 'user_id' from the 'users' collection. + :param user_id: + :return: user's details + ''' + + try: + user_data = list(self.db['users'].find({"user_id" : user_id})) + + return user_data[0] + + except Exception as e: + utils.logger.exception("__Error while searching user details from 'user' collection in MongoDB__" + str(e)) + + + # ------------------------------------------- Searching profiles with right query from table_1 --------------------- + def find_profiles_having_similar_interests_spoken_languages(self, user_data): + ''' + Searching profile with given user_id from the 'users' collection. + :param user_data: + :return: profile details + ''' + + # if user_data["aInterest"] == ['']: + # user_interests_list = user_data["aInterest"] + # else: + # user_interests_list = [x["interest"] for x in user_data["aInterest"]] + # + # if user_data["aLanguage"] == ['']: + # user_languages_list = user_data["aLanguage"] + # else: + # user_languages_list = [x["language"] for x in user_data["aLanguage"]] + + + user_interests_list = "" + user_languages_list = "" + + if "aInterest" in user_data.keys(): + user_interests_list = [x for x in user_data["aInterest"]] + + if "aLanguage" in user_data.keys(): + user_languages_list = [x for x in user_data["aLanguage"]] + + + + # user_coordinates = [x for x in user_data["geometry"]["coordinates"]] + + + try: + # db["mydb"].find( + # {"$and": [ + # {"field": var1}, + # {"field": { + # "$ne": var2 + # }} + # ]} + # ) + + + # ll = list(self.db["users"].find( + # {"$and": [ + # {"user_id": {"$ne": user_data["user_id"]}}, + # {"gender": user_data["gender"]} + # ] + # } + # )) + + + similar_interests_users_list = list(self.db['users'].find( + {"$and": + [ + {"$and": + [ + {"user_id": {"$ne": user_data["user_id"]}}, + {"gender": user_data["gender"]} + ] + }, + + { + "$or": + [ + {"aInterest": + {"$elemMatch": {"$in": user_interests_list}} + }, + {"aLanguage": + {"$elemMatch": {"$in": user_languages_list}} + }, + { + "body_type": user_data["body_type"] + }, + { + "profession": user_data["profession"] + }, + { + "sex_orientation": user_data["sex_orientation"] + }, + { + "location": user_data["location"] + }, + + ] + } + ] + } + ) + ) + + pprint(similar_interests_users_list) + return similar_interests_users_list + + except Exception as e: + utils.logger.exception("__Error while searching user details from 'user' collection in MongoDB__" + str(e)) + + + + + + # -------------------------------------------Finding recommended profiles to be displayed on discovery page for a user --------------------- + def mongodb_search_to_recommend_profiles_for_discovery_page(self, user_data): + ''' + Searching profile with given user_id from the 'users' collection. + :param user_data: + :return: profile details + ''' + + user_interests_list = "" + user_languages_list = "" + + if "aInterest" in user_data.keys(): + user_interests_list = [x for x in user_data["aInterest"]] + + if "aLanguage" in user_data.keys(): + user_languages_list = [x for x in user_data["aLanguage"]] + + + # user_coordinates = [x for x in user_data["geometry"]["coordinates"]] + + + try: + # db["mydb"].find( + # {"$and": [ + # {"field": var1}, + # {"field": { + # "$ne": var2 + # }} + # ]} + # ) + + gender = '' + if user_data["gender"] == "Male": + gender = "Female" + elif user_data["gender"] == "Female": + gender = "Male" + + + # ll = list(self.db["users"].find( + # {"$and": [ + # {"user_id": {"$ne": user_data["user_id"]}}, + # {"gender": gender} + # ] + # } + # )) + + + # similar_interests_but_opposite_gender_users_list = list(self.db['users'].find( + # {"$and": + # [ + # {"$and": + # [ + # {"user_id": {"$ne": user_data["user_id"]}}, + # {"gender": gender} + # ] + # }, + # + # { + # "$or": + # [ + # {"aInterest": + # {"$elemMatch": {"interest": {"$in": user_interests_list}}} + # }, + # {"aLanguage": + # {"$elemMatch": {"language": {"$in": user_languages_list}}} + # }, + # ] + # } + # ] + # } + # ) + # ) + + + similar_interests_but_opposite_gender_users_list = list(self.db['users'].find( + {"$and": + [ + {"$and": + [ + {"user_id": {"$ne": user_data["user_id"]}}, + {"gender": gender}, + # {"sex_orientation": user_data["sex_orientation"]} + ] + }, + + { + "$or": + [ + {"aInterest": + {"$elemMatch": {"$in": user_interests_list}} + }, + {"aLanguage": + {"$elemMatch": {"$in": user_languages_list}} + }, + { + "body_type": user_data["body_type"] + }, + { + "profession": user_data["profession"] + }, + { + "sex_orientation": user_data["sex_orientation"] + }, + { + "location": user_data["location"] + }, + + ] + } + ] + } + ) + ) + + pprint(similar_interests_but_opposite_gender_users_list) + return similar_interests_but_opposite_gender_users_list + + except Exception as e: + utils.logger.exception("__Error while searching user details from 'user' collection in MongoDB__" + str(e)) + + + + + # ------------------------------------------- Searching profile with given user_id from the 'friend_statuses' collection. --------------------- + + def discover_new_friends(self, user_id): + ''' + Finding those profiles who have not yet discovered by the users. + The use has taken some actions against other profile. Requirement is to find those users who are not yet discovered. + :param user_data: + :return: profile details + ''' + + try: + # mongo_query_interest = { + # 'account_type': 'exfactor', + # 'interest': interest, + # + # } + lst_friends_statuses = list(self.db["friend_statuses"].find({"from_user_id": user_id})) + + discovered_friends = [] + + for i in lst_friends_statuses: + discovered_friends.append(i["to_user_id"]) + + + return discovered_friends + + except Exception as e: + utils.logger.exception("__Error while searching user details from 'user' collection in MongoDB__" + str(e)) diff --git a/Scripts/Utility/Log/__pycache__/logger.cpython-36.pyc b/Scripts/Utility/Log/__pycache__/logger.cpython-36.pyc new file mode 100644 index 00000000..a8b63aaa Binary files /dev/null and b/Scripts/Utility/Log/__pycache__/logger.cpython-36.pyc differ diff --git a/Scripts/Utility/Log/__pycache__/logger.cpython-37.pyc b/Scripts/Utility/Log/__pycache__/logger.cpython-37.pyc new file mode 100644 index 00000000..0759a705 Binary files /dev/null and b/Scripts/Utility/Log/__pycache__/logger.cpython-37.pyc differ diff --git a/Scripts/Utility/Log/logger.py b/Scripts/Utility/Log/logger.py new file mode 100644 index 00000000..628bb2ac --- /dev/null +++ b/Scripts/Utility/Log/logger.py @@ -0,0 +1,40 @@ +""" +Logger Utility +""" + +import logging.handlers +import os +import time +import datetime + +class Logger: + """ + Contain method for logger class + """ + def __init__(self, config): + """ + + :param config: + """ + print("3. Log>>logger.py file") + ts = time.time() + + service_name = config["service_name"] + log_dir_path = config["path"]["log_path"] + + if not os.path.isdir(log_dir_path): + os.makedirs(log_dir_path) + + log_file = os.path.join(log_dir_path, service_name) + + formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s", "%Y-%m-%d %H:%M%S") + + logHandler = logging.handlers.TimedRotatingFileHandler(filename=str(log_file), backupCount=2, when="M", interval=600) + logHandler.setLevel(config["log_level"]) + logHandler.setFormatter(formatter) + + # Instantiating the "logging.getLogger()" class ... + self.log_obj = logging.getLogger(service_name) + self.log_obj.addHandler(logHandler) + self.log_obj.setLevel(config["log_level"]) + self.log_obj.debug("Logger Initialized") \ No newline at end of file diff --git a/Scripts/Utility/__pycache__/utils.cpython-36.pyc b/Scripts/Utility/__pycache__/utils.cpython-36.pyc new file mode 100644 index 00000000..7c6ca070 Binary files /dev/null and b/Scripts/Utility/__pycache__/utils.cpython-36.pyc differ diff --git a/Scripts/Utility/__pycache__/utils.cpython-37.pyc b/Scripts/Utility/__pycache__/utils.cpython-37.pyc new file mode 100644 index 00000000..0f9423d3 Binary files /dev/null and b/Scripts/Utility/__pycache__/utils.cpython-37.pyc differ diff --git a/Scripts/Utility/utils.py b/Scripts/Utility/utils.py new file mode 100644 index 00000000..0336ae87 --- /dev/null +++ b/Scripts/Utility/utils.py @@ -0,0 +1,10 @@ +import os +import __root__ + +from Configuration import configreader +from Scripts.Utility.Log.logger import Logger + +# Read the configurations +config_file = os.path.join(__root__.path(), "Configuration", "service.yml") +configuration = configreader.read_configuration(config_file) # parse the 'service.yml' file +logger = Logger(configuration).log_obj \ No newline at end of file diff --git a/Scripts/__init__.py b/Scripts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/Scripts/__pycache__/__init__.cpython-36.pyc b/Scripts/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 00000000..29ce0116 Binary files /dev/null and b/Scripts/__pycache__/__init__.cpython-36.pyc differ diff --git a/Scripts/__pycache__/__init__.cpython-37.pyc b/Scripts/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..c2438516 Binary files /dev/null and b/Scripts/__pycache__/__init__.cpython-37.pyc differ diff --git a/Scripts/__pycache__/xfactor_main.cpython-36.pyc b/Scripts/__pycache__/xfactor_main.cpython-36.pyc new file mode 100644 index 00000000..3b419b92 Binary files /dev/null and b/Scripts/__pycache__/xfactor_main.cpython-36.pyc differ diff --git a/Scripts/__pycache__/xfactor_main.cpython-37.pyc b/Scripts/__pycache__/xfactor_main.cpython-37.pyc new file mode 100644 index 00000000..99c87e9a Binary files /dev/null and b/Scripts/__pycache__/xfactor_main.cpython-37.pyc differ diff --git a/Scripts/xfactor_main.py b/Scripts/xfactor_main.py new file mode 100644 index 00000000..2ba8b6a8 --- /dev/null +++ b/Scripts/xfactor_main.py @@ -0,0 +1,149 @@ +import os +import random +import shutil +import json +import __root__ +import numpy as np +from flask import Blueprint +from flask import request +from flask import * +from flask import jsonify +from flask import render_template +from werkzeug.utils import secure_filename +from pprint import pprint +from collections import OrderedDict + + +import io +import os +import shutil +from datetime import datetime +import json +import bson.json_util +import functools + +import flask +from werkzeug.utils import secure_filename +from werkzeug.exceptions import RequestEntityTooLarge + +from PIL import Image +# import pillow_avif +from io import BytesIO +import numpy as np + + +from Constants import const +from Scripts.Services.ImageMatchingServices.main_image_matching_service import Main +from Scripts.Services.AwsConnection.aws_s3_bucket_conn import AWS_S3BucketConnection +from Scripts.Services.FaceRecognitionLibrary.face_recognition_distance import FaceRecognition +from Scripts.Services.MongoConnection.mongo_connection import MongoConn +from Scripts.Utility import utils + +exception_message = '{"status":False, "status":"Server error, please contact your administrator"}' +method_error_message = '{"status": False, "message": "Method not supported!"}' + +app_main = Blueprint("app_main", __name__) +APP_ROOT = os.path.dirname(os.path.abspath(__file__)) + +server = "testing" + +# server = "production" + + +@app_main.route("/index") +@app_main.route("/") +def index(): + target_folder = os.path.join(__root__.path(), "static/img") + for root, dirs, files in os.walk(target_folder): + for f in files: + file_path = os.path.join(root, f) + try: + if os.path.isfile(file_path) or os.path.islink(file_path): + os.unlink(file_path) + except Exception as e: + print("Failed to delete % s. Reason: %s" %(file_path, str(e))) + + + return flask.render_template("home.html", file_path="img/image_here.jpg") + +@app_main.route("/upload", methods=["POST"]) +def upload(): + target = os.path.join(APP_ROOT, "static/img") + if not os.path.isdir(target): + # if os.name in ["nt", "posix"]: + os.makedirs(target) + + for file in request.files.getlist("file"): + if file.filename.endswith(tuple(current_app.config["ALLOWED_EXTENSION"])): + file.save("static/img/" + const.img_now) + elif file.filename.endswith("avif"): + img = Image.open(file) + img.save("static/img/" + const.img_now) + + shutil.copyfile("static/img/" + const.img_now, "static/img/" + const.img_normal) + + return flask.render_template("upload.html", file_path="img/img_now.jpg") + +@app_main.route("/suspect_faces", methods=["GET", "POST"]) +def suspect_faces(): + if request.method == "POST": + try: + top_matched_numbers = request.form.getlist('top_matched_numbers')[0] + + + # for root, subdirs, files in os.walk("./Data/compare_profile_images"): + # for file in files: + # print("Deleting file : {}".format(file)) + # os.remove(os.path.join(root, file)) + + test_suspect_image_file = [] + for root, subdirs, files in os.walk(const.suspect_test_image): + for file in files: + test_suspect_image_file.append(os.path.join(root, file)) + + all_suspect_images = [] + for root, subdirs, files in os.walk(const.suspects_images_repo): + for file in files: + all_suspect_images.append(os.path.join(root, file)) + + + compare_faces_obj = FaceRecognition() + + from collections import defaultdict + + response = {} + + ref_img_encodings = OrderedDict() + list_ref_img_encodings = [] + list_ref_img_names = [] + + for ref_img in all_suspect_images: + if os.path.isfile(ref_img): + img_encoding = compare_faces_obj.face_enconding(ref_img) + if isinstance(img_encoding, np.ndarray): + ref_img_encodings[ref_img.split("/")[-1]] = img_encoding + list_ref_img_names.append(ref_img.split("/")[-1]) + list_ref_img_encodings.append(img_encoding) + + if os.path.isfile(test_suspect_image_file[0]): + test_img_encoding = compare_faces_obj.face_enconding(test_suspect_image_file[0]) + + is_comparable_list, face_dist_list = compare_faces_obj.face_comparison_encoding(list_ref_img_encoding=list_ref_img_encodings, test_img_encoding=test_img_encoding) + response2 = [] + for k, i, j in zip(ref_img_encodings.keys(), is_comparable_list, face_dist_list): + response[k] = {"is_comparable": i, "face_dist": j} + # response2.append([k, i, j]) + + final_response = dict(OrderedDict(sorted(response.items(), key=lambda kv: kv[1]['face_dist'], reverse=False)[0:int(top_matched_numbers)])) + # for k, v in final_response: + # # for k1, v1 in v.values(): + # response2.append([k, list(v.values())]) + + print("final_response........", response2) + return json.dumps(str(final_response)) + # return render_template("matched.html", result=final_response) + + + + except Exception as e: + utils.logger.exception("__Error__" + str(e)) diff --git a/__pycache__/__root__.cpython-36.pyc b/__pycache__/__root__.cpython-36.pyc new file mode 100644 index 00000000..58d6f7ef Binary files /dev/null and b/__pycache__/__root__.cpython-36.pyc differ diff --git a/__pycache__/__root__.cpython-37.pyc b/__pycache__/__root__.cpython-37.pyc new file mode 100644 index 00000000..c340bb1b Binary files /dev/null and b/__pycache__/__root__.cpython-37.pyc differ diff --git a/__pycache__/run_flasked_api.cpython-37.pyc b/__pycache__/run_flasked_api.cpython-37.pyc new file mode 100644 index 00000000..f1f439c2 Binary files /dev/null and b/__pycache__/run_flasked_api.cpython-37.pyc differ diff --git a/__root__.py b/__root__.py new file mode 100644 index 00000000..2c034146 --- /dev/null +++ b/__root__.py @@ -0,0 +1,9 @@ +import os + + +def path(): + """ + + :return: the root path + """ + return os.path.dirname(__file__) \ No newline at end of file diff --git a/dlib b/dlib new file mode 160000 index 00000000..973de8ac --- /dev/null +++ b/dlib @@ -0,0 +1 @@ +Subproject commit 973de8ac734a2f2cb07edcd97f657581d82a9236 diff --git a/gnuincorn_systemd2.txt b/gnuincorn_systemd2.txt new file mode 100644 index 00000000..0f8a9f96 --- /dev/null +++ b/gnuincorn_systemd2.txt @@ -0,0 +1,10 @@ +[Unit] +Description=Gunicorn instance to serve xfactor_image_match_microservices_v2 +After=network.target + +[Service] +User=ubuntu +Group=www-data +WorkingDirectory=/home/ubuntu/projects/xfactor_image_match_microservices_v2 +Environment="PATH=/home/ubuntu/venv_dir/venv_image_match_v1/bin" +ExecStart=/home/ubuntu/venv_dir/venv_image_match_v1/bin/gunicorn --workers 3 --bind unix:xfactor_image_match_microservices_v2.sock -m 007 run_flasked_api:app_wsgi --preload -b 0.0.0.0:4007 diff --git a/log/xfactor_image_match_microservice_v1 b/log/xfactor_image_match_microservice_v1 new file mode 100644 index 00000000..cdf7fb60 --- /dev/null +++ b/log/xfactor_image_match_microservice_v1 @@ -0,0 +1,158 @@ +2023-02-05 05:1729 DEBUG Logger Initialized +2023-02-05 08:5011 DEBUG Logger Initialized +2023-02-05 09:4701 DEBUG Logger Initialized +2023-02-05 10:3641 DEBUG Logger Initialized +2023-02-05 10:3649 DEBUG Logger Initialized +2023-02-05 10:3820 DEBUG Logger Initialized +2023-02-05 10:3936 DEBUG Logger Initialized +2023-02-05 10:4124 DEBUG Logger Initialized +2023-02-05 10:4247 DEBUG Logger Initialized +2023-02-05 10:4722 DEBUG Logger Initialized +2023-02-05 10:4917 DEBUG Logger Initialized +2023-02-05 10:5105 DEBUG Logger Initialized +2023-02-05 10:5331 DEBUG Logger Initialized +2023-02-05 10:5424 DEBUG Logger Initialized +2023-02-05 11:1834 DEBUG Logger Initialized +2023-02-05 11:1911 DEBUG Logger Initialized +2023-02-05 11:2048 DEBUG Logger Initialized +2023-02-05 11:2107 ERROR __Error__'NoneType' object is not subscriptable +Traceback (most recent call last): + File "/home/amit/afghani_root_folder/ElintData/PoliceHackathon/ImageMatching/police_hackathon_suspect_image_match/Scripts/xfactor_main.py", line 92, in suspect_faces + user_id = user_json_data["user_id"] +TypeError: 'NoneType' object is not subscriptable +2023-02-05 11:2251 DEBUG Logger Initialized +2023-02-05 11:2843 DEBUG Logger Initialized +2023-02-05 11:2847 DEBUG Logger Initialized +2023-02-05 11:3109 DEBUG Logger Initialized +2023-02-05 11:4151 DEBUG Logger Initialized +2023-02-05 11:5342 DEBUG Logger Initialized +2023-02-05 11:5940 DEBUG Logger Initialized +2023-02-05 12:0103 ERROR __Error__result.html +Traceback (most recent call last): + File "/home/amit/afghani_root_folder/ElintData/PoliceHackathon/ImageMatching/police_hackathon_suspect_image_match/Scripts/xfactor_main.py", line 139, in suspect_faces + return render_template("result.html", result=final_response) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/flask/templating.py", line 138, in render_template + ctx.app.jinja_env.get_or_select_template(template_name_or_list), + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 1068, in get_or_select_template + return self.get_template(template_name_or_list, parent, globals) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 997, in get_template + return self._load_template(name, globals) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 958, in _load_template + template = self.loader.load(self, name, self.make_globals(globals)) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/loaders.py", line 125, in load + source, filename, uptodate = self.get_source(environment, name) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/flask/templating.py", line 60, in get_source + return self._get_source_fast(environment, template) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/flask/templating.py", line 89, in _get_source_fast + raise TemplateNotFound(template) +jinja2.exceptions.TemplateNotFound: result.html +2023-02-05 12:0351 DEBUG Logger Initialized +2023-02-05 12:0359 ERROR __Error__list index out of range +Traceback (most recent call last): + File "/home/amit/afghani_root_folder/ElintData/PoliceHackathon/ImageMatching/police_hackathon_suspect_image_match/Scripts/xfactor_main.py", line 91, in suspect_faces + top_matched_numbers = request.form.getlist('top_matched_numbers')[0] +IndexError: list index out of range +2023-02-05 12:0912 DEBUG Logger Initialized +2023-02-05 12:0944 ERROR __Error__result.html +Traceback (most recent call last): + File "/home/amit/afghani_root_folder/ElintData/PoliceHackathon/ImageMatching/police_hackathon_suspect_image_match/Scripts/xfactor_main.py", line 139, in suspect_faces + return render_template("result.html", result=final_response) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/flask/templating.py", line 138, in render_template + ctx.app.jinja_env.get_or_select_template(template_name_or_list), + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 1068, in get_or_select_template + return self.get_template(template_name_or_list, parent, globals) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 997, in get_template + return self._load_template(name, globals) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 958, in _load_template + template = self.loader.load(self, name, self.make_globals(globals)) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/loaders.py", line 125, in load + source, filename, uptodate = self.get_source(environment, name) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/flask/templating.py", line 60, in get_source + return self._get_source_fast(environment, template) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/flask/templating.py", line 89, in _get_source_fast + raise TemplateNotFound(template) +jinja2.exceptions.TemplateNotFound: result.html +2023-02-05 12:1041 DEBUG Logger Initialized +2023-02-05 12:1110 ERROR __Error__result.html +Traceback (most recent call last): + File "/home/amit/afghani_root_folder/ElintData/PoliceHackathon/ImageMatching/police_hackathon_suspect_image_match/Scripts/xfactor_main.py", line 139, in suspect_faces + return render_template("result.html", result=final_response) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/flask/templating.py", line 138, in render_template + ctx.app.jinja_env.get_or_select_template(template_name_or_list), + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 1068, in get_or_select_template + return self.get_template(template_name_or_list, parent, globals) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 997, in get_template + return self._load_template(name, globals) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 958, in _load_template + template = self.loader.load(self, name, self.make_globals(globals)) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/loaders.py", line 125, in load + source, filename, uptodate = self.get_source(environment, name) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/flask/templating.py", line 60, in get_source + return self._get_source_fast(environment, template) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/flask/templating.py", line 89, in _get_source_fast + raise TemplateNotFound(template) +jinja2.exceptions.TemplateNotFound: result.html +2023-02-05 12:1139 DEBUG Logger Initialized +2023-02-05 12:1631 DEBUG Logger Initialized +2023-02-05 12:1958 DEBUG Logger Initialized +2023-02-05 12:2210 DEBUG Logger Initialized +2023-02-05 12:2251 ERROR __Error__Encountered unknown tag 'endfor'. +Traceback (most recent call last): + File "/home/amit/afghani_root_folder/ElintData/PoliceHackathon/ImageMatching/police_hackathon_suspect_image_match/Scripts/xfactor_main.py", line 139, in suspect_faces + return render_template("matched.html", result=final_response) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/flask/templating.py", line 138, in render_template + ctx.app.jinja_env.get_or_select_template(template_name_or_list), + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 1068, in get_or_select_template + return self.get_template(template_name_or_list, parent, globals) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 997, in get_template + return self._load_template(name, globals) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 958, in _load_template + template = self.loader.load(self, name, self.make_globals(globals)) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/loaders.py", line 137, in load + code = environment.compile(source, name, filename) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 757, in compile + self.handle_exception(source=source_hint) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 925, in handle_exception + raise rewrite_traceback_stack(source=source) + File "/home/amit/afghani_root_folder/ElintData/PoliceHackathon/ImageMatching/police_hackathon_suspect_image_match/templates/matched.html", line 28, in template + {% endfor %} +jinja2.exceptions.TemplateSyntaxError: Encountered unknown tag 'endfor'. +2023-02-05 12:2656 DEBUG Logger Initialized +2023-02-05 12:2756 DEBUG Logger Initialized +2023-02-05 12:2828 ERROR __Error__too many values to unpack (expected 2) +Traceback (most recent call last): + File "/home/amit/afghani_root_folder/ElintData/PoliceHackathon/ImageMatching/police_hackathon_suspect_image_match/Scripts/xfactor_main.py", line 139, in suspect_faces + return render_template("matched.html", result=final_response) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/flask/templating.py", line 140, in render_template + ctx.app, + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/flask/templating.py", line 120, in _render + rv = template.render(context) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 1291, in render + self.environment.handle_exception() + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 925, in handle_exception + raise rewrite_traceback_stack(source=source) + File "/home/amit/afghani_root_folder/ElintData/PoliceHackathon/ImageMatching/police_hackathon_suspect_image_match/templates/matched.html", line 15, in top-level template code + {% for key, values in result %} +ValueError: too many values to unpack (expected 2) +2023-02-05 12:2957 DEBUG Logger Initialized +2023-02-05 12:3027 ERROR __Error__too many values to unpack (expected 2) +Traceback (most recent call last): + File "/home/amit/afghani_root_folder/ElintData/PoliceHackathon/ImageMatching/police_hackathon_suspect_image_match/Scripts/xfactor_main.py", line 139, in suspect_faces + return render_template("matched.html", result=final_response) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/flask/templating.py", line 140, in render_template + ctx.app, + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/flask/templating.py", line 120, in _render + rv = template.render(context) + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 1291, in render + self.environment.handle_exception() + File "/home/amit/anaconda3/envs/venv_xfactor_image_matching/lib/python3.7/site-packages/jinja2/environment.py", line 925, in handle_exception + raise rewrite_traceback_stack(source=source) + File "/home/amit/afghani_root_folder/ElintData/PoliceHackathon/ImageMatching/police_hackathon_suspect_image_match/templates/matched.html", line 15, in top-level template code + +ValueError: too many values to unpack (expected 2) +2023-02-05 12:5103 DEBUG Logger Initialized +2023-02-05 12:5132 ERROR __Error__too many values to unpack (expected 2) +Traceback (most recent call last): + File "/home/amit/afghani_root_folder/ElintData/PoliceHackathon/ImageMatching/police_hackathon_suspect_image_match/Scripts/xfactor_main.py", line 138, in suspect_faces + for k, v in final_response: +ValueError: too many values to unpack (expected 2) diff --git a/run_flasked_api.py b/run_flasked_api.py new file mode 100644 index 00000000..161b2446 --- /dev/null +++ b/run_flasked_api.py @@ -0,0 +1,33 @@ +""" +Import all necessary packages +""" + +# import API level modules +from flask import Flask +import flask_compress +from flask_cors import CORS +import __root__ + +# import project files +from Scripts.xfactor_main import app_main +from Scripts.Utility import utils + + + +app_wsgi = Flask(__name__) + +# Compressing all response: +flask_compress.Compress(app_wsgi) + +# Connecting all services via blueprint +app_wsgi.register_blueprint(app_main) + +CORS(app_wsgi, resources={r'/*': {'origin': '*'}}) +# Setting configs +app_wsgi.config["UPLOAD_FOLDER"] = __root__.path() + "/static/img" +app_wsgi.config["MAX_CONTENT_LENGTH"] = 5*1024*1024 # 5 MB +app_wsgi.config["ALLOWED_EXTENSION"] = [".jpg", ".jpeg", ".png", ".gif"] +# Declaring Apps: + +if __name__ == "__main__": + app_wsgi.run(host=utils.configuration["settings"]["ip"], port=utils.configuration["settings"]["port"], debug=True, threaded=True, use_reloader=False) \ No newline at end of file diff --git a/running_command.txt b/running_command.txt new file mode 100644 index 00000000..c8c47f93 --- /dev/null +++ b/running_command.txt @@ -0,0 +1,44 @@ +sudo ufw allow 4007 +cd ~/xfactor_image_match_microservices_v2 +gunicorn run_flasked_api:app_wsgi --preload -b 0.0.0.0:4007 --daemon + +#----------------------------------------------------------------------------------------------------------------------- +sudo vim /etc/systemd/system/xfactor_image_match_microservices_v2.service + +Let’s put a description of our service here and tell the init system to only start this after the networking target has been reached: + [Unit] + Description=Gunicorn instance to serve xfactor_image_match_microservices_v2 + After=network.target + +Next, let’s open up the [Service] section. This will specify the user and group that we want the process to run under. +Let’s give our regular user account ownership of the process since it owns all of the relevant files. +Let’s also give group ownership to the www-data group so that Nginx can communicate easily with the Gunicorn processes. +Remember to replace the username here with your username: + +[Unit] +Description=Gunicorn instance to serve xfactor_image_match_microservices_v2 +After=network.target + +[Service] +User=amitparashar +Group=www-data +WorkingDirectory=/home/sammy/myproject +Environment="PATH=/home/sammy/myproject/myprojectenv/bin" +ExecStart=/home/sammy/myproject/myprojectenv/bin/gunicorn --workers 3 --bind unix:myproject.sock -m 007 wsgi:app + +[Install] +WantedBy=multi-user.target + + + + +[Unit] +Description=Gunicorn instance to serve xfactor_image_match_microservices_v2 +After=network.target + +[Service] +User=ubuntu +Group=www-data +WorkingDirectory=/home/ubuntu/projects/xfactor_image_match_microservices_v2 +Environment="PATH=/home/ubuntu/venv_dir/venv_image_match_v1/bin" +ExecStart=/home/ubuntu/venv_dir/venv_image_match_v1/bin/gunicorn --workers 3 --bind unix:xfactor_image_match_microservices_v2.sock -m 007 run_flasked_api:app_wsgi --preload -b 0.0.0.0:4007 \ No newline at end of file diff --git a/static/css/style.css b/static/css/style.css new file mode 100644 index 00000000..82e7bc98 --- /dev/null +++ b/static/css/style.css @@ -0,0 +1,82 @@ +body { + margin: 0; + padding: 0; + font-family: "Helvetica Neue", Helvetica, Arial, sans-serif; + color: #444; +} +/* + * Formatting the header area + */ +header { + background-color: #DFB887; + height: 35px; + width: 100%; + opacity: .9; + margin-bottom: 10px; +} +header h1.logo { + margin: 0; + font-size: 1.7em; + color: #fff; + text-transform: uppercase; + float: left; +} +header h1.logo:hover { + color: #fff; + text-decoration: none; +} +/* + * Centering the body content + */ +.container { + width: 1200px; + margin: 0 auto; +} +div.home { + padding: 10px 0 30px 0; + background-color: #E6E6FA; + -webkit-border-radius: 6px; + -moz-border-radius: 6px; + border-radius: 6px; +} +div.about { + padding: 10px 0 30px 0; + background-color: #E6E6FA; + -webkit-border-radius: 6px; + -moz-border-radius: 6px; + border-radius: 6px; +} +h2 { + font-size: 3em; + margin-top: 40px; + text-align: center; + letter-spacing: -2px; +} +h3 { + font-size: 1.7em; + font-weight: 100; + margin-top: 30px; + text-align: center; + letter-spacing: -1px; + color: #999; +} +.menu { + float: right; + margin-top: 8px; +} +.menu li { + display: inline; +} +.menu li + li { + margin-left: 35px; +} +.menu li a { + color: #444; + text-decoration: none; +} + +table +{ +margin-left: auto; +margin-right: auto; +} \ No newline at end of file diff --git a/static/img/img_normal.jpg b/static/img/img_normal.jpg new file mode 100644 index 00000000..a95adac4 Binary files /dev/null and b/static/img/img_normal.jpg differ diff --git a/static/img/img_now.jpg b/static/img/img_now.jpg new file mode 100644 index 00000000..a95adac4 Binary files /dev/null and b/static/img/img_now.jpg differ diff --git a/static/logo.png b/static/logo.png new file mode 100644 index 00000000..57aee033 Binary files /dev/null and b/static/logo.png differ diff --git a/templates/about.html b/templates/about.html new file mode 100644 index 00000000..5add4c5d --- /dev/null +++ b/templates/about.html @@ -0,0 +1,16 @@ + + + + Font Awesome Icons + + + + + +

afghani.iitkgp

+ + + + + + \ No newline at end of file diff --git a/templates/home.html b/templates/home.html new file mode 100644 index 00000000..1a819b97 --- /dev/null +++ b/templates/home.html @@ -0,0 +1,13 @@ +{% extends "layout.html" %} {% block body %} +
+
+

Suspect Face Detection Application

+

Webapp for image processing for Karnataka Police Dept.

+
+ Browse an image... + + +
+
+
+{% endblock %} \ No newline at end of file diff --git a/templates/includes/_navbar.html b/templates/includes/_navbar.html new file mode 100644 index 00000000..3ec9e6f7 --- /dev/null +++ b/templates/includes/_navbar.html @@ -0,0 +1,17 @@ + \ No newline at end of file diff --git a/templates/layout.html b/templates/layout.html new file mode 100644 index 00000000..2e076832 --- /dev/null +++ b/templates/layout.html @@ -0,0 +1,19 @@ + + + + + + + + Digital Image Processing + + + + + {% include "includes/_navbar.html" %} +
+ {% block body %} {% endblock %} +
+ + + \ No newline at end of file diff --git a/templates/matched.html b/templates/matched.html new file mode 100644 index 00000000..6d17c539 --- /dev/null +++ b/templates/matched.html @@ -0,0 +1,72 @@ + + + + + + + + + + + + + + + + + + + +{% for key1, values1 in final_response.items() %} + {% for key2, values2 in values1.items() %} + + + + + + + + {% for row_header in row_headers %} + + {% endfor %} + + + {% for key2, values2 in values.items() %} + + + {% for value in values2.values() %} + + {% endfor %} + + {% endfor %} + {% endfor %} + + + + +
SuspectIDface_similarityis_comparable
key1values1
Name2Description2
Name3Description3
{{ key }}
{{ row_header }}
{{ key2 }} + {{ value }} +
Name1Description1
Name2Description2
Name3Description3
+ + {% for key, values in result %} + + + + {% for row_header in row_headers %} + + {% endfor %} + + + {% for key2, values2 in values.items() %} + + + {% for value in values2.values() %} + + {% endfor %} + + {% endfor %} + {% endfor %} +
{{ key }}
{{ row_header }}
{{ key2 }} + {{ value }} +
+ diff --git a/templates/upload.html b/templates/upload.html new file mode 100644 index 00000000..8d86b211 --- /dev/null +++ b/templates/upload.html @@ -0,0 +1,39 @@ +{% extends "layout.html" %} {% block body %} +
+

Menu

+
+
+
+

Click Options

+
+
+
+ +
+
+
+
+

Number of suspects

+ +
+
+
+
+
+
+
+
+
+
+
+

After

+ image here +
+
+
+
+
+
+ +{% endblock %} \ No newline at end of file