?这篇是对前面四个功能的整合,调用了百度API里的语音识别,文本翻译,人脸检测,聊天机器人四个功能。其实就是对设计个GUI界面将前面四个代码整合成一篇,具体的API调用实现这里就不再多说
实现结果看下面这个视频:
【python】tkinter界面化+百度API——语音系统
?直接上完整代码:
(我只是将四个功能直接copy整合,所以里面有好几个方法在各个类里重复出现。)。。。其实就是懒得调试了!!哈哈哈哈哈哈哈
from tkinter import *
from PIL import Image,ImageTk
from aip import AipSpeech
import pyaudio
from scipy import fftpack
import wave
import numpy as np
import requests
import pyttsx3
import matplotlib.pyplot as plt
import base64
import tkinter.filedialog
import random
import cv2
#主界面设计,创建类,在构造方法中没计界面
class StartPage():
def __init__(self):
self.ASR = ASR()
self.Translate = Translate()
self.Face_Detection = Face_Detection()
self.Unit_Robot = Unit_Robot()
#创建窗口
self.root = Tk()# Toplevel() # Tk()
self.root.resizable(width=False, height=False) #设置窗口不可改变大小
self.root.title('语音系统') #设置标题
self. root. geometry('500x800') #设置窗口大小
# 打开图像,转为tkinter兼容的对象,
img = Image.open('1.jpg').resize([500,800])
self.img = ImageTk.PhotoImage(img)
#创建画布,将图像作为画布背景, 铺满整个窗口
self.canvas = Canvas(self.root, width=500, height=800) #设置画布的宽、高
self.canvas.place(x=0, y=0)
self.canvas.create_image(250,400,image = self.img) #把图像放到画布,默认放置中心点
self.canvas.create_text(250,100,text = '语音系统',font=('宋体', 40)) #在画布上放置文本(透明背景)
Button(self.root, width=10, text='语音识别', font=('宋体', 20), fg='white',
command=lambda :self.jumpweb(1), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x = 175,y = 200)
Button(self.root, width = 10,text='语音翻译', font=('宋体', 20), fg='white',
command=lambda :self.jumpweb(2), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x = 175,y = 300)
Button(self.root, width = 10,text='人脸检测', font=('宋体', 20), fg='white',
command=lambda :self.jumpweb(3), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x = 175,y = 400)
Button(self.root, width = 10,text='聊天机器人', font=('宋体', 20), fg='white',
command=lambda :self.jumpweb(4), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x = 175,y = 500)
Button(self.root, width=10, text='退 出', font=('宋体', 20), fg='white',
command=lambda :self.jumpweb(5), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x = 175,y = 600)
self.root.mainloop(0)
def jumpweb(self,N):
if N == 1:
self.root.destroy()
self.ASR.AsrPage()
elif N == 2:
self.root.destroy()
self.Translate.TransPage()
elif N == 3:
self.root.destroy()
self.Face_Detection.FacePage()
elif N == 4:
self.root.destroy()
self.Unit_Robot.UnitPage()
else:
self.root.destroy()
'''-----------------------------------------------------------------------------------------------------------------------------------------------------------'''
#语音识别界面设计,创建类,在构造方法中没计界面
class ASR():
def AsrPage(self):
self.ID = '语音识别的ID'
self.Key = '语音识别的API_key'
self.Secret = '语音识别的Secret_key'
# 用语音类创建对象
self.client = AipSpeech(self.ID, self.Key, self.Secret) # 语音识别对象
# 创建窗口
self.window = Tk() # Toplevel() # Tk()
self.window.resizable(width=False, height=False)
self.window.title('语音识别') # 设置标题
self.window.geometry('500x800') # 设置窗口大小
# 打开图像,转为tkinter兼容的对象,
img = Image.open('2.jpg').resize([500, 800])
self.img = ImageTk.PhotoImage(img)
# 创建画布,将图像作为画布背景, 铺满整个窗口
self.canvas = Canvas(self.window, width=500, height=800) # 设置画布的宽、高
self.canvas.place(x=0, y=0)
self.canvas.create_image(250, 400, image=self.img) # 把图像放到画布,默认放置中心点
self.canvas.create_text(250, 100, text='语音识别', font=('宋体', 40))
# 创建标题标签
# Label(self.window, bg='royalblue', text='语音识别', font=('宋体', 40), fg='white').pack(pady=90) # 上下间隔100
# 创建文本框
self.content = Text(self.window, width=22, height=5, font=('宋体', 20))
self.content.place(x=100, y=200)
# 创建按钮
Button(self.window, width=10, text='语音转文字', font=('宋体', 20), fg='white',
command=lambda: self.StoT(), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x=100, y=600) # activebackground 设置按键按下有变化 activebforeground设置前景色
# Button(self.window, width = 5,text='识别', font=('宋体', 20), fg='white',
# command=self.jumpweb2, bg='dodgerblue', activebackground='black',
# activeforeground='white').place(x = 200, y =600)
Button(self.window, width=10, text='文字转语音', font=('宋体', 20), fg='white',
command=lambda: self.say(), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x=260, y=600)
Button(self.window, width=21, text='返回', font=('宋体', 20), fg='white',
command=lambda: self.back(), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x=100, y=650)
Button(self.window, width=5, text='清空', font=('宋体', 20), fg='white',
command=lambda: self.delete_text(), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x=300, y=350)
self.window.mainloop(0)
def delete_text(self):
self.content.delete(0.0, END)
def insert_text(self, text):
self.content.insert('insert', text)
def get_adio(sec=0):
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=16000,
input=True,
frames_per_buffer=1024)
wf = wave.open('test.wav', 'wb')
wf.setnchannels(1)
wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
wf.setframerate(16000)
print('开始说话')
stopflag = 0
conflag = 0
while True:
data = stream.read(1024)
rt_data = np.frombuffer(data, np.dtype('<i2'))
fft_temp_data = fftpack.fft(rt_data, rt_data.size, overwrite_x=True)
fft_data = np.abs(fft_temp_data)[0:fft_temp_data.size // 2 + 1]
# print(sum(fft_data) // len (fft_data))
# 判断麦克风是否停止,判断说话是否结束,#麦克风阀值,默认7000
if sum(fft_data) // len(fft_data) > 7000:
conflag += 1
else:
stopflag += 1
oneSecond = int(16000 / 1024)
if stopflag + conflag > oneSecond: # 如果两种情况的次数超过一帧的大小
if stopflag > oneSecond // 3 * 2: # 其中无声的部分超过一帧的2/3,则停止
break
else:
stopflag = 0
conflag = 0
wf.writeframes(data)
print('停止说话')
stream.stop_stream()
stream.close()
p.terminate()
wf.close()
return 'test.wav'
def say(self):
engine = pyttsx3.init()
engine.say(self.content.get(0.0, END))
engine.runAndWait()
def StoT(self):
file = self.get_adio()
# 调用对象进行识别,需要为对象传递参数:
# 识别三种格式:wav,pcm,amr
# 语音文件,语音格式,采样频率,识别ID(1573:中文普通话)
Format = file[-3:]
data = open(file, 'rb').read()
result = self.client.asr(data, Format, 16000, {'dev_pid': 1537})
result = result['result'][0]
self.insert_text(result)
def back(self):
self.window.destroy()
StartPage()
'''-----------------------------------------------------------------------------------------------------------------------------------------------------------'''
#语音翻译界面设计,创建类,在构造方法中没计界面
class Translate():
def TransPage(self):
self.api_key = '文本翻译的API_key'
self.secret_key = '文本翻译的secret_key'
self.token = self.get_token(self.api_key, self.secret_key)
self.ID = '语音识别的ID'
self.Key = '语音识别的API_key'
self.Secret = '语音识别的Secret_key'
# 用语音类创建对象
self.client = AipSpeech(self.ID, self.Key, self.Secret) # 语音识别对象
# 创建窗口
self.screen = Tk() # Toplevel() # Tk()
self.screen.resizable(width=False, height=False)
self.screen.title('语音翻译') # 设置标题
self.screen.geometry('500x800') # 设置窗口大小
# 打开图像,转为tkinter兼容的对象,
img = Image.open('3.jpg').resize([500, 800])
self.img = ImageTk.PhotoImage(img)
# 创建画布,将图像作为画布背景, 铺满整个窗口
self.canvas = Canvas(self.screen, width=500, height=800) # 设置画布的宽、高
self.canvas.place(x=0, y=0)
self.canvas.create_image(250, 400, image=self.img) # 把图像放到画布,默认放置中心点
self.canvas.create_text(250, 100, text='语音翻译', font=('宋体', 40))
self.canvas.create_text(110, 170, text='原文:', font=('宋体', 20), fill='green')
self.canvas.create_text(110, 270, text='译文:', font=('宋体', 20), fill='blue')
# 创建原文文本框
self.original = Text(self.screen, width=22, height=2, font=('宋体', 20))
self.original.place(x=100, y=200)
# 创建译文标签
# Label(self.screen, bg='dodgerblue', text='译文:', font=('宋体', 20), fg='white').place(x=100, y=250)
# 创建译文文本框
self.Translation = Text(self.screen, width=22, height=2, font=('宋体', 20))
self.Translation.place(x=100, y=300)
# 创建按钮
Button(self.screen, width=10, text='文本翻译', font=('宋体', 20), fg='white',
command=lambda: self.text_run(), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x=80, y=600) # activebackground 设置按键按下有变化 activebforeground设置前景色
Button(self.screen, width=10, text='语音翻译', font=('宋体', 20), fg='white',
command=lambda: self.adio_run(), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x=280, y=600)
Button(self.screen, width=25, text='返回', font=('宋体', 20), fg='white',
command=lambda: self.jumpweb(), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x=80, y=650)
Button(self.screen, width=5, text='清空', font=('宋体', 20), fg='white',
command=lambda: self.delete_text(), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x=330, y=380)
self.screen.mainloop(0)
def delete_text(self):
self.original.delete(0.0, END)
self.Translation.delete(0.0, END)
def get_token(self, key, secret):
url = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=' + key + '&client_secret=' + secret
response = requests.post(url)
result = response.json()
result = result['access_token']
return result
# print(get_token(api_key,secret_key))
# 确定源语言和翻译的目标语言
def is_eng(self, text):
if ord(text) in range(ord('a'), ord('z') + 1) or ord(text[0]) in range(ord('A'), ord('Z') + 1):
return 'en', 'zh'
else:
return 'zh', 'en'
def translate(self, text, token):
url = 'https://aip.baidubce.com/rpc/2.0/mt/texttrans/v1'
# 参数:URL + access_token
# 请求参数:from 什么语音 to 什么语音,q 翻译什么内容
fr, tr = self.is_eng(text[0])
url = url + '?access_token=' + token + '&from=' + fr + '&to=' + tr + '&q=' + text
response = requests.post(url)
result = response.json()
result = result['result']['trans_result'][0]['dst']
return result
# print(result)
# 创建函数进行语音播放
def say(self, text):
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
def text_run(self):
text = self.original.get(0.0, END)
self.Translation.insert('insert', self.translate(text, self.token))
# print('翻译内容:', self.translate(text, self.token))
self.say(self.translate(text, self.token))
def get_adio(self, sec=0):
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=16000,
input=True,
frames_per_buffer=1024)
wf = wave.open('test.wav', 'wb')
wf.setnchannels(1)
wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
wf.setframerate(16000)
print('开始说话')
stopflag = 0
conflag = 0
while True:
data = stream.read(1024)
rt_data = np.frombuffer(data, np.dtype('<i2'))
fft_temp_data = fftpack.fft(rt_data, rt_data.size, overwrite_x=True)
fft_data = np.abs(fft_temp_data)[0:fft_temp_data.size // 2 + 1]
# print(sum(fft_data) // len (fft_data))
# 判断麦克风是否停止,判断说话是否结束,#麦克风阀值,默认7000
if sum(fft_data) // len(fft_data) > 7000:
conflag += 1
else:
stopflag += 1
oneSecond = int(16000 / 1024)
if stopflag + conflag > oneSecond: # 如果两种情况的次数超过一帧的大小
if stopflag > oneSecond // 3 * 2: # 其中无声的部分超过一帧的2/3,则停止
break
else:
stopflag = 0
conflag = 0
wf.writeframes(data)
print('停止说话')
stream.stop_stream()
stream.close()
p.terminate()
wf.close()
return 'test.wav'
def StoT(self):
file = self.get_adio()
# 调用对象进行识别,需要为对象传递参数:
# 识别三种格式:wav,pcm,amr
# 语音文件,语音格式,采样频率,识别ID(1573:中文普通话)
Format = file[-3:]
data = open(file, 'rb').read()
result = self.client.asr(data, Format, 16000, {'dev_pid': 1537})
result = result['result'][0]
# print(result)
return result
def adio_run(self):
text = self.StoT()
self.original.insert('insert', text)
self.Translation.insert('insert', self.translate(text, self.token))
# print('原文:',text)
# print('翻译内容:', self.translate(text, self.token))
self.say(self.translate(text, self.token))
def jumpweb(self):
self.screen.destroy()
StartPage()
'''-----------------------------------------------------------------------------------------------------------------------------------------------------------'''
#人脸检测界面设计,创建类,在构造方法中没计界面
class Face_Detection():
def FacePage(self):
# 创建窗口
self.interface = Tk() # Toplevel() # Tk()
self.interface.resizable(width=False, height=False)
self.interface.title('人脸检测') # 设置标题
self.interface.geometry('500x800') # 设置窗口大小
# 打开图像,转为tkinter兼容的对象,
IMG = Image.open('4.jpg').resize([500, 800])
self.IMG = ImageTk.PhotoImage(IMG)
# 创建画布,将图像作为画布背景, 铺满整个窗口
self.canvas = Canvas(self.interface, width=500, height=800) # 设置画布的宽、高
self.canvas.place(x=0, y=0)
self.canvas.create_image(250, 400, image=self.IMG) # 把图像放到画布,默认放置中心点
self.canvas.create_text(250, 100, text='人脸检测', font=('宋体', 40), fill='white')
# 创建按钮
cam = Image.open('cam.png').resize([70, 70])
self.cam = ImageTk.PhotoImage(cam)
Button(self.interface, image=self.cam, font=('宋体', 20), fg='white',
command=lambda: self.Camera(), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x=120, y=600) # activebackground 设置按键按下有变化 activebforeground设置前景色
pic = Image.open('pic.png').resize([70, 70])
self.pic = ImageTk.PhotoImage(pic)
Button(self.interface, image=self.pic, font=('宋体', 20), fg='white',
command=lambda: self.OpenPhoto(), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x=220, y=600)
face = Image.open('face.png').resize([70, 70])
self.face = ImageTk.PhotoImage(face)
Button(self.interface, image=self.face, font=('宋体', 20), fg='white',
command=lambda: self.Detect(), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x=320, y=600)
Button(self.interface, width=19, text='返回', font=('宋体', 20), fg='white',
command=lambda: self.back(), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x=120, y=700)
self.interface.mainloop(0)
# client_id 为官网获取的AK, client_secret 为官网获取的SK
def token(self):
api_key = '人脸检测的api_key'
secret_key = '人脸检测的secret_key'
# 获取token的网址传递AIPkey和secretkey
host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=' + api_key + '&client_secret=' + secret_key
# 用POST方式对URL网址进行请求,获得响应结果
response = requests.post(url=host)
# 以JSON格式获取响应内容
result = response.json()
access_token = result['access_token']
return access_token
def Camera(self):
cap = cv2.VideoCapture(0) # 0为内置摄像头,1为外接摄像头cv
while True:
success, img = cap.read()
cv2.imshow('Capture', img)
if cv2.waitKey(1) in [27, ord('q')]:
break
cv2.destroyAllWindows()
if cv2.waitKey(1) in [ord('c'), 13]:
img_name = '%s/%s.jpg' % ('IMG', 'cam')
cv2.imwrite(img_name, img)
cv2.destroyAllWindows()
f = open(img_name, 'rb').read()
# 在将二进制进行转码
self.img = base64.b64encode(f).decode('utf-8')
break
self.SetImg(img_name)
# cv2.imshow('getFaceIamge', img)
def OpenPhoto(self):
root = tkinter.Tk() # 创建一个窗口
root.withdraw() # 隐藏窗口实例
# 利用文件对话框获取文件路径
file_path = tkinter.filedialog.askopenfilename(title='选择文件')
# 在程序窗口显示图片文件
# 打开图像
# img = 'lena.jpg'
pic = Image.open(file_path)
# 显示文件
plt.axis('off')
# plt.imshow(pic)
# plt.show()
# 将文件转为base64编码
# 先以二进制读取文件
f = open(file_path, 'rb').read()
# 在将二进制进行转码
self.img = base64.b64encode(f).decode('utf-8')
# print(img)
root.destroy()
self.SetImg(file_path)
def SetImg(self, photo_path):
# 打开图像,转为tkinter兼容的对象,
image = Image.open(photo_path).resize([250, 340])
self.image = ImageTk.PhotoImage(image)
# 创建画布,将图像作为画布背景, 铺满整个窗口
self.canvas = Canvas(self.interface, width=250, height=340) # 设置画布的宽、高
self.canvas.place(x=130, y=130)
self.canvas.create_image(125, 170, image=self.image) # 把图像放到画布,默认放置中心点
def Detect(self):
# 确定连接网址
URL = 'https://aip.baidubce.com/rest/2.0/face/v3/detect'
# 设置URL参数
URL = URL + '?access_token=' + self.token()
# 调用图像数据
# img = self.OpenPhoto()
# 设置请求参数
params = {
'image': self.img,
'image_type': 'BASE64',
'face_field': 'age,gender,beauty,expression'
}
# 向网址发生POST请求
response = requests.post(url=URL, data=params)
result = response.json()
# print(result)
result = result['result']['face_list'][0]
text = ('年龄:' + str(result['age']) + ' 性别:' + str(result['gender']['type']) + '\n' + '颜值:' + str(
result['beauty']) + ' 表情:' + str(result['expression']['type']))
# print('年龄:', result['age'])
# print('性别:', result['gender']['type'])
# print('颜值:', result['beauty'])
# print('表情:', result['expression']['type'])
self.canvas = Canvas(self.interface, width=250, height=60) # 设置画布的宽、高
self.canvas.place(x=130, y=480)
self.canvas.create_text(115, 30, text=text, font=('宋体', 15), fill='black')
def back(self):
self.interface.destroy()
StartPage()
'''-----------------------------------------------------------------------------------------------------------------------------------------------------------'''
#聊天机器人界面设计,创建类,在构造方法中没计界面
class Unit_Robot():
def UnitPage(self):
self.ID = '语音识别的ID'
self.Key = '语音识别的API_key'
self.Secret = '语音识别的Secret_key'
# 用语音类创建对象
self.client = AipSpeech(self.ID, self.Key, self.Secret) # 语音识别对象
# 创建窗口
self.page = Tk() # Toplevel() # Tk()
self.page.resizable(width=False, height=False)
self.page.title('聊天机器人') # 设置标题
self.page.geometry('500x800') # 设置窗口大小
# 打开图像,转为tkinter兼容的对象,
img = Image.open('5.jpg').resize([500, 800])
self.img = ImageTk.PhotoImage(img)
# 创建画布,将图像作为画布背景, 铺满整个窗口
self.canvas = Canvas(self.page, width=500, height=800) # 设置画布的宽、高
self.canvas.place(x=0, y=0)
self.canvas.create_image(250, 400, image=self.img) # 把图像放到画布,默认放置中心点
self.canvas.create_text(250, 100, text='聊天机器人', font=('宋体', 40))
self.canvas.create_text(110, 170, text='内容:', font=('宋体', 20), fill='green')
self.canvas.create_text(110, 270, text='回答:', font=('宋体', 20), fill='blue')
# 创建内容文本框
self.text = Text(self.page, width=22, height=2, font=('宋体', 20))
self.text.place(x=100, y=200)
# 创建回答文本框
self.Reply = Text(self.page, width=22, height=2, font=('宋体', 20))
self.Reply.place(x=100, y=300)
# 创建按钮,游客直接登录到单词界面,学生则需验证账号密码
Button(self.page, width=8, text='聊天', font=('宋体', 20), fg='white',
command=lambda: self.adio_run(), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x=200, y=600) # activebackground 设置按键按下有变化 activebforeground设置前景色
Button(self.page, width=8, text='返回', font=('宋体', 20), fg='white',
command=lambda: self.back(), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x=200, y=650)
Button(self.page, width=5, text='清空', font=('宋体', 20), fg='white',
command=lambda: self.delete_text(), bg='dodgerblue', activebackground='black',
activeforeground='white').place(x=330, y=380)
self.page.mainloop(0)
def delete_text(self):
self.text.delete(0.0, END)
self.Reply.delete(0.0, END)
def token(self):
Key = '聊天机器人的API_key'
Secret = '聊天机器人的Secret_key'
url = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=' + Key + '&client_secret=' + Secret
result = requests.post(url)
result = result.json()
access_token = result['access_token']
return access_token
def get_adio(self, sec=0):
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=16000,
input=True,
frames_per_buffer=1024)
wf = wave.open('test.wav', 'wb')
wf.setnchannels(1)
wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
wf.setframerate(16000)
print('开始说话')
stopflag = 0
conflag = 0
while True:
data = stream.read(1024)
rt_data = np.frombuffer(data, np.dtype('<i2'))
fft_temp_data = fftpack.fft(rt_data, rt_data.size, overwrite_x=True)
fft_data = np.abs(fft_temp_data)[0:fft_temp_data.size // 2 + 1]
# print(sum(fft_data) // len (fft_data))
# 判断麦克风是否停止,判断说话是否结束,#麦克风阀值,默认7000
if sum(fft_data) // len(fft_data) > 7000:
conflag += 1
else:
stopflag += 1
oneSecond = int(16000 / 1024)
if stopflag + conflag > oneSecond: # 如果两种情况的次数超过一帧的大小
if stopflag > oneSecond // 3 * 2: # 其中无声的部分超过一帧的2/3,则停止
break
else:
stopflag = 0
conflag = 0
wf.writeframes(data)
print('停止说话')
stream.stop_stream()
stream.close()
p.terminate()
wf.close()
return 'test.wav'
def say(self, text):
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
def StoT(self):
file = self.get_adio()
# 调用对象进行识别,需要为对象传递参数:
# 识别三种格式:wav,pcm,amr
# 语音文件,语音格式,采样频率,识别ID(1573:中文普通话)
Format = file[-3:]
data = open(file, 'rb').read()
result = self.client.asr(data, Format, 16000, {'dev_pid': 1537})
result = result['result'][0]
# print(result)
return result
def adio_run(self):
self.delete_text()
text = self.StoT()
self.text.insert('insert', text)
# print('原文:', text)
reply = self.Unit(text)
self.Reply.insert('insert', reply)
# print('回答内容:', reply)
self.say(reply)
def Unit(self, chat):
url = 'https://aip.baidubce.com/rpc/2.0/unit/service/v3/chat'
url = url + '?access_token=' + self.token()
params = {
'version': '3.0',
'service_id': 'S60564',
'log_id': str(random.random()),
'session_id': '',
'request': {'terminal_id': '123456', 'query': chat},
}
response = requests.post(url=url, json=params)
result = response.json()
'result{response[{action[{confidence,say'
# 报错的处理
if result['error_code'] != 0:
return '网页正忙'
result = result['result']['responses'][0]['actions']
reply_act = random.choice([conf for conf in result if conf['confidence'] > 0])
reply = reply_act['say']
# print(reply)
return reply
def back(self):
self.page.destroy()
StartPage()
if __name__ == '__main__':
StartPage()
|