中国移动 webstorm汉化包 map遍历 智慧树 checkbox sharepoint jQuery Mobile 百度seo关键词优化 在线考试系统代码 bootstrap侧边栏 oracle无效的列索引 matlab对数函数 pyhton中异常和模块 kubernetes安装 python数据类型 mysql查询 python中文文档 python分析 java编程基础 java接口类 java什么是多态 java对象和类 java接口实现 randomjava java中泛型 java基本数据结构 java格式化字符串 java判断是否为空 java字符串函数 远程登录linux rewritebase 打马赛克的软件 python的用途 eml文件阅读器下载 pr缩放 qq免安装版 模拟按键 数组删除指定元素 cad视口旋转 vscode全局搜索
当前位置: 首页 > 学习教程  > 编程语言

实时分析视频中的物理要素,并实时得出视频的共感评分

2020/8/11 19:04:50 文章标签:

下面展示一些 内联代码片

# -*- coding: utf-8 -*-
import cv2
import pandas as pd
import os
import time
import numpy as np
import pickle
from sklearn import preprocessing
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')

def mkdir(path_3):
    folder = os.path.exists(path_3)
    if not folder:  # 判断是否存在文件夹如果不存在则创建为文件夹
        os.makedirs(path_3)  # makedirs 创建文件时如果路径不存在会创建这个路径
        print("---  new folder...  ---")
        print("---  OK  ---")
    else:
        print("---  There is this folder!  ---")
def extract_colors_from_videos(dir):
    cap = cv2.VideoCapture(dir)
    # Initialize variables
    frame_cnt = 0
    # Read frames
    # f_index = [];gray_avg = [];r_avg = [];g_avg = [];b_avg = [];h_avg = [];s_avg = [];v_avg = [];l_avg = [];alpha_avg = [];beta_avg = []
    while True:
        ret, frame = cap.read() # Capture frame
        if ret:
            print("frame_cnt:", frame_cnt, "image_size:", len(frame[0]), len(frame))
            frame_cnt += 1  # frame count
            print(frame_cnt)
            gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert to gray scale
            rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Convert to RGB
            hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Convert to HSV
            lab_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)  # Convert to LAB
            # 요소별 분리
            r, g, b = cv2.split(rgb_frame)
            h, s, v = cv2.split(hsv_frame)
            l, alpha, beta = cv2.split(lab_frame)
            Image = []
            print(len(gray_frame))
            for i in range(len(gray_frame)):
                # 전체 프레임 평균 색상 값
                images = []
                gray_frame_mean = np.mean(gray_frame[i])
                h_mean = np.mean(h[i])
                s_mean = np.mean(s[i])
                alpha_mean = np.mean(alpha[i])
                beta_mean = np.mean(beta[i])
                # print(frame_cnt,gray_frame_mean,h_mean,s_mean,alpha_mean, beta_mean)
                images.append(gray_frame_mean);images.append(h_mean);images.append(s_mean);images.append(alpha_mean);images.append(beta_mean)
                # time.sleep(5)

                Image.append(images)
            return Image
        else: # 마지막 frame이면
            break # 종료
    cap.release()

def video(dir,x,y):
    fig = plt.figure(figsize=(6.4, 2.4), dpi=100)
    cap = cv2.VideoCapture(dir)

    X = []
    Y = []

    for i in range(len(y)):
        # update data
        plt.cla()
        plt.title("empathy")
        # y轴范围和名称
        plt.ylabel("percent")
        plt.ylim(0.0, 1.0)
        # x轴范围和名称
        plt.xlabel("time")
        plt.xlim(0.0, len(y))
        X.append(x[i])
        Y.append(y[i])
        line1 = plt.plot(X, Y, '-r')
        line1 = plt.text(x[i], y[i], '%.2f%%' % (y[i]*100), ha='center', va='bottom', color="r", fontsize=20)
        # redraw the canvas
        fig.canvas.draw()
        # convert canvas to image
        img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8,
                            sep='')
        img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))

        # img is rgb, convert to opencv's default bgr
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        ret, frame = cap.read()
        frame = cv2.resize(frame, (640, 360))
        numpy_vertical_concat = np.concatenate((img, frame), axis=0)

        cv2.imshow('Real Time Empathy Evaluation', numpy_vertical_concat)
        k = cv2.waitKey(1) & 0xFF
        # if k == 27:
        #     break
def pred(train_clf_knn):
    Images = extract_colors_from_videos(dir)
    f_knn = open(train_clf_knn, 'rb')
    s_knn = f_knn.read()
    model_knn = pickle.loads(s_knn)
    X_test = preprocessing.scale(Images)
    y_pred_knn = model_knn.predict(X_test)
    print(len(y_pred_knn))
    x = np.arange(1,len(y_pred_knn)+1,1)
    empathy = 0; no_empathy = 0
    y =[]
    for m in range(len(y_pred_knn)):
        if str(y_pred_knn[m]) == '1':
            empathy = empathy + 1
            n = empathy/len(y_pred_knn)
            n = '%.2f' % n
            y.append(float(n))
        else:
            no_empathy = no_empathy + 1
            n = no_empathy / len(y_pred_knn)
            n = '%.2f' % n
            y.append(float(n))
    return x,y


if __name__ == '__main__':
    dir = 'E:/ffmpeg-latest-win64-static/2019_11_12_experimental_data/test_video/video/47.mp4'
    train_clf_knn = 'E:/ffmpeg-latest-win64-static/2019_11_12_experimental_data/GAZE_DATA_backup/7_09_proceess/classifier/pickle/image_svm.pickle'
    x,y = pred(train_clf_knn)
    video(dir,x,y)


本文链接: http://www.dtmao.cc/news_show_100055.shtml

附件下载

相关教程

    暂无相关的数据...

共有条评论 网友评论

验证码: 看不清楚?