
如何在Java、Python、GO程序中使用AI人臉識別API接口
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
這里有一個簡單的示例來幫助你開始:
import cv2
from keras.applications.fer2013_mini import FER2013Mini, preprocess_input
from keras.preprocessing import image
import numpy as np
# 加載模型
model = FER2013Mini()
# 加載圖像
img_path = 'path_to_your_image.jpg'
img = image.load_img(img_path, target_size=(48, 48))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# 預測情緒
preds = model.predict(x)
print('預測情緒:', np.argmax(preds[0]))
# 加載面部檢測模型
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# 讀取圖像
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 檢測面部
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi_gray = gray[y:y+h, x:x+w]
roi = roi_gray.copy()
roi = cv2.resize(roi, (48, 48))
roi = preprocess_input(roi)
roi = np.expand_dims(roi, axis=0)
preds = model.predict(roi)
cv2.putText(img, 'Emotion: ' + str(np.argmax(preds[0])), (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36,255,12), 2)
# 顯示圖像
cv2.imshow('Emotion Recognition', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
本案例只是用來學習,并無實際的商業應用價值,請開發者在冪簡集成 的 API Hub上搜索’情緒識別’,直接使用這些成熟的商業api,例如: