百度语音识别通过 REST API 的方式给开发者提供一个通用的 HTTP 接口,基于该接口,开发者可以轻松的获取语音识别能力。SDK中只提供了PHP、C和JAVA的相关样例,然而个人以为,使用Python开发难度更低,本文描述了简单使用Python调用百度语音识别服务 REST API 的简单样例。
注册开发者账号和创建应用不再赘述,百度的REST API在调用过程基本分为三步:
- 获取token
- 提交数据
- 处理JSON
获取token
#Api_Key申请的api_key
#Secrect_Key对应的secret_key
url = 'https://openapi.baidu.com/oauth/2.0/token?grant_type=client_credentials&client_id='+Api_Key+'&client_secret='+Secrect_Key
res = urllib2.urlopen(url).read()
data = json.loads(res)
token = data['access_token'] #获取的token
print 'token获取成功:'+`token`
提交数据
VOICE_RATE = 8000
WAVE_FILE = 'temp.wav'
USER_ID = 'duvoice'
WAVE_TYPE = 'wav'
#其它参数可参考sdk文档
f = open(WAVE_FILE,'r')
speech = base64.b64encode(f.read())
size = os.path.getsize(WAVE_FILE)
update = json.dumps({'format':WAVE_TYPE,'rate':VOICE_RATE,'channel':1,'cuid':USER_ID,'token':token,'speech':speech,'len':size})
r = urllib2.urlopen(self.url,update)
处理JSON
t = r.read()
result = json.loads(t)
if result['err_msg']=='success.':
word = result['result'][0].encode('utf-8')
if word!='':
if word[len(word)-3:len(word)]==',':
print word[0:len(word)-3]
return word[0:len(word)-3]
else:
print word
return word
else:
print "音频文件不存在或格式错误"
return ''
else:
print "错误"
经过以上步骤便可提交一个wav文件进行解析,同时也可使用python的pyaudio库进行录音:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyaudio import PyAudio, paInt16
import numpy as np
from datetime import datetime
import wave
class recoder:
NUM_SAMPLES = 2000 #pyaudio内置缓冲大小
SAMPLING_RATE = 8000 #取样频率
LEVEL = 500 #声音保存的阈值
COUNT_NUM = 20 #NUM_SAMPLES个取样之内出现COUNT_NUM个大于LEVEL的取样则记录声音
SAVE_LENGTH = 8 #声音记录的最小长度:SAVE_LENGTH * NUM_SAMPLES 个取样
TIME_COUNT = 60 #录音时间,单位s
Voice_String = []
def savewav(self,filename):
wf = wave.open(filename, 'wb')
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(self.SAMPLING_RATE)
wf.writeframes("".join(self.Voice_String))
wf.close()
def recoder(self):
pa = PyAudio()
stream = pa.open(format=paInt16, channels=1, rate=self.SAMPLING_RATE, input=True,
frames_per_buffer=self.NUM_SAMPLES)
save_count = 0
save_buffer = []
time_count = self.TIME_COUNT
while True:
time_count -= 1
# print time_count
# 读入NUM_SAMPLES个取样
string_audio_data = stream.read(self.NUM_SAMPLES)
# 将读入的数据转换为数组
audio_data = np.fromstring(string_audio_data, dtype=np.short)
# 计算大于LEVEL的取样的个数
large_sample_count = np.sum( audio_data > self.LEVEL )
print np.max(audio_data)
# 如果个数大于COUNT_NUM,则至少保存SAVE_LENGTH个块
if large_sample_count > self.COUNT_NUM:
save_count = self.SAVE_LENGTH
else:
save_count -= 1
if save_count < 0:
save_count = 0
if save_count > 0 :
# 将要保存的数据存放到save_buffer中
#print save_count > 0 and time_count >0
save_buffer.append( string_audio_data )
else:
#print save_buffer
# 将save_buffer中的数据写入WAV文件,WAV文件的文件名是保存的时刻
#print "debug"
if len(save_buffer) > 0 :
self.Voice_String = save_buffer
save_buffer = []
print "Recode a piece of voice successfully!"
return True
if time_count==0:
if len(save_buffer)>0:
self.Voice_String = save_buffer
save_buffer = []
print "Recode a piece of voice successfully!"
return True
else:
return False
|