@@ -99,17 +99,37 @@ async def predict(items: Audio):
9999 decoded_data = requests .get (items .file ).content
100100
101101 load_model (model_type = "long" , hotword = items .hotword )
102- rec_result = loaded_model ["model" ](audio_in = decoded_data )
103102 result = []
104- log .info (rec_result )
105- for sentence in rec_result ["sentences" ]:
103+ try :
104+ rec_result = loaded_model ["model" ](audio_in = decoded_data )
105+ log .info (rec_result )
106+ except :
107+ result .append (
108+ {
109+ "text" : "" ,
110+ "start" : 0.0 ,
111+ "end" : 0.0
112+ }
113+ )
114+ log .info ("pass small file" )
115+ try :
116+ for sentence in rec_result ["sentences" ]:
117+ result .append (
118+ {
119+ "text" : sentence ["text" ],
120+ "start" : sentence ["start" ] / 1000.0 ,
121+ "end" : sentence ["end" ] / 1000.0
122+ }
123+ )
124+ except :
106125 result .append (
107126 {
108- "text" : sentence [ "text" ] ,
109- "start" : sentence [ "start" ] / 1000 .0 ,
110- "end" : sentence [ "end" ] / 1000 .0
127+ "text" : "" ,
128+ "start" : 0 .0 ,
129+ "end" : 0 .0
111130 }
112131 )
132+ log .info ("mute file" )
113133 log .info (result )
114134 return result
115135
@@ -125,7 +145,7 @@ async def health_check():
125145@app .get ("/health/inference" )
126146async def health_check ():
127147 try :
128- load_model (model_type = "normal " , hotword = None )
148+ load_model (model_type = "long " , hotword = None )
129149 rec_result = loaded_model ["model" ](audio_in = "./16000_001.wav" )
130150 log .info ("health 200" )
131151 return status .HTTP_200_OK
0 commit comments