#!/usr/bin/env python
import os
import cv2
import torch
from transformers import AutoImageProcessor, ResNetForImageClassification
processor = AutoImageProcessor.from_pretrained("microsoft/resnet-50")
model = ResNetForImageClassification.from_pretrained("microsoft/resnet-50")
script_dir = os.path.dirname(os.path.abspath(__file__))
os.chdir(script_dir)
def img2embed(image):
inputs = processor(image,return_tensors="pt")
with torch.no_grad():
out = model(**inputs).logits[0]
return out
embed_a = img2embed(cv2.imread('image_a.jpg'));
embed_b = img2embed(cv2.imread('image_b.jpg'));
def a_or_b(image):
embed_c = img2embed(image);
dist_a = torch.norm(embed_c - embed_a)
dist_b = torch.norm(embed_c - embed_b)
if dist_a < dist_b:
return False
elif dist_b < dist_a:
return True
from PIL import Image
from flask import Flask, request, jsonify, render_template
import numpy as np
app = Flask(__name__)
@app.route('/')
def upload_form():
return render_template('upload.html')
import logging
from flask import Flask, request, jsonify
from PIL import Image
import cv2
import numpy as np
import time
import datetime
# Configure general logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s', handlers=[
logging.FileHandler("app.log"),
logging.StreamHandler()
])
# Configure machine learning logging
ml_logger = logging.getLogger('ml_logger')
ml_logger.setLevel(logging.INFO)
ml_handler = logging.FileHandler('ml_log.csv')
ml_handler.setFormatter(logging.Formatter('%(message)s'))
ml_logger.addHandler(ml_handler)
##@app.route('/compare', methods=['POST'])
##def compare_images():
## if request.method == 'POST':
## #image = cv2.imread(request.files['image'])
## image_file = request.files['image']
## image = Image.open(image_file)
## image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
## result = a_or_b(image)
## return jsonify({'result': result})
@app.route('/compare', methods=['POST'])
def compare_images():
if request.method == 'POST':
try:
logging.info("Received POST request on /compare")
image_file = request.files['image']
logging.info("Image file received: %s", image_file.filename)
image = Image.open(image_file)
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
logging.info("Image converted to OpenCV format")
result = a_or_b(image)
logging.info("Comparison result: %s", result)
# Log for machine learning purposes
now = datetime.datetime.now()
unixtime = int(now.timestamp())
seconds_since_midnight = (now - now.replace(hour=0, minute=0, second=0, microsecond=0)).total_seconds()
percentage_of_day = seconds_since_midnight / 86400 # 86400 seconds in a day
ml_logger.info(f"{unixtime},{percentage_of_day:.6f},{result}")
return jsonify({'result': result})
except Exception as e:
logging.error("Error processing image: %s", e)
return jsonify({'error': str(e)}), 500
if __name__ == '__main__':
app.run(host='10.0.0.53',debug=True)
app.run(host='127.0.0.1',debug=True)
print('ready')
#def a_or_b(image):
# embed_c = img2embed(image)
# similarity_a = F.cosine_similarity(embed_c, embed_a, dim=0)
# similarity_b = F.cosine_similarity(embed_c, embed_b, dim=0)
#
# if similarity_a > similarity_b:
# return "Image is closer to image_a"
# elif similarity_b > similarity_a:
# return "Image is closer to image_b"
# else:
# return "Images are equally close"