| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417 |
- import time
- from fallback import fallback
- from config import simplemodel
- from template import xuanxiang
- import json
- with open('name_label', 'r') as f:
- content = f.read()
- name_label=json.loads(content)
- from fallback import fallback
- def aifilter1(A, #options
- B, #data
- aiclient,
- qwclient,
- sfclient,
- dw):
- options=[]
- letters = "ABCDEFGHIJKLMN"
- for i in range(len(A)):
- options.append("给定选项" + letters[i]+",内容为"+A[i] )
- completion = aiclient.chat.completions.create(
- model="glm-4.5-air",
- #model="THUDM/GLM-Z1-9B-0414",
- #model="ernie-speed-128k",
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": " 特殊处理要求一:如果工作内容提及的是内墙,则去掉所有精确含有“外墙涂料”字样的选项"},
- {"role": "user", "content": " 特殊处理要求二:如果工作内容没有明确提及抗裂腻子,则去掉所有精确含有“抗裂腻子”字样的选项"},
- {"role": "user", "content": " 特殊处理要求三:如果工作内容没有明确提及批腻子,则去掉所有精确含有“外墙批抗裂腻子”字样的选项"},
- {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
- {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
- {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " + B['mc'] + " " + B['tz'] + ",".join(options) + "。请根据处理要求做出处理,并返回结果, 删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
- ],
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": True},
- #stream=True
- )
- #done_thinking = False
- #json_string=""
- #thinking_json_string=""
- #for chunk in completion:
- # thinking_chunk = chunk.choices[0].delta.reasoning_content
- # answer_chunk = chunk.choices[0].delta.content
- # if thinking_chunk != '':
- # thinking_json_string = thinking_json_string + thinking_chunk
- # elif answer_chunk != '':
- # if not done_thinking:
- # done_thinking = True
- # json_string = json_string + answer_chunk
- json_string = completion.choices[0].message.content
- #print(completion.choices[0].message.reasoning_content)
- print(json_string)
- if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
- answer=[]
- if 'A' in json_string and len(A) > 0:
- answer.append(A[0])
- if 'B' in json_string and len(A) > 1:
- answer.append(A[1])
- if 'C' in json_string and len(A) > 2:
- answer.append(A[2])
- if 'D' in json_string and len(A) > 3:
- answer.append(A[3])
- if 'E' in json_string and len(A) > 4:
- answer.append(A[4])
- if 'F' in json_string and len(A) > 5:
- answer.append(A[5])
- if 'G' in json_string and len(A) > 6:
- answer.append(A[6])
- if 'H' in json_string and len(A) > 7:
- answer.append(A[7])
- if 'I' in json_string and len(A) > 8:
- answer.append(A[8])
- if 'J' in json_string and len(A) > 9:
- answer.append(A[9])
- return answer
- completion = sfclient.chat.completions.create(
- #model="glm-4.5-flash",
- model=simplemodel(),
- messages=xuanxiang(json_string),
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": False},
- )
- json_string = completion.choices[0].message.content
- print(json_string)
- answer=[]
- if 'A' in json_string and len(A) > 0:
- answer.append(A[0])
- if 'B' in json_string and len(A) > 1:
- answer.append(A[1])
- if 'C' in json_string and len(A) > 2:
- answer.append(A[2])
- if 'D' in json_string and len(A) > 3:
- answer.append(A[3])
- if 'E' in json_string and len(A) > 4:
- answer.append(A[4])
- if 'F' in json_string and len(A) > 5:
- answer.append(A[5])
- if 'G' in json_string and len(A) > 6:
- answer.append(A[6])
- if 'H' in json_string and len(A) > 7:
- answer.append(A[7])
- if 'I' in json_string and len(A) > 8:
- answer.append(A[8])
- if 'J' in json_string and len(A) > 9:
- answer.append(A[9])
- return answer
- def handle_nizi(selected, data,aiclient,qwclient,sfclient,label_name,name_dw,candidates):
- options=[]
- options_=[]
- options.append('不使用腻子')
- options.append('使用腻子')
- options.append('使用保温腻子')
- options.append('腻子2道(遍)')
- options.append('腻子3道(遍)')
- letters = "ABCDEFGHIJKLMN"
- for i in range(len(options)):
- options_.append("给定选项" + letters[i]+",内容为"+options[i] )
- completion = aiclient.chat.completions.create(
- model="glm-4.5-air",
- #model="THUDM/GLM-Z1-9B-0414",
- #model="ernie-speed-128k",
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": "问题描述: 给定一段工作内容;"+ ",".join(options_) + "。请选出与工作内容最匹配的选项。例如,如果工作内容中不使用腻子,则输出A。再例如,如果工作内容中提到刷腻子2遍,则输出D.现在给定一段工作内容: "+ data['label'] + " " + data['mc'] + " " + data['tz'] + "\n请给出分析过程并请输出A、B这样的字母作为答案"},
- ],
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": True},
- #stream=True
- )
- #done_thinking = False
- #json_string=""
- #thinking_json_string=""
- #for chunk in completion:
- # thinking_chunk = chunk.choices[0].delta.reasoning_content
- # answer_chunk = chunk.choices[0].delta.content
- # if thinking_chunk != '':
- # thinking_json_string = thinking_json_string + thinking_chunk
- # elif answer_chunk != '':
- # if not done_thinking:
- # done_thinking = True
- # json_string = json_string + answer_chunk
- json_string = completion.choices[0].message.content
- #print(completion.choices[0].message.reasoning_content)
- print(json_string)
- if len(json_string) < 5:
- answer='A'
- if 'B' in json_string:
- answer='B'
- if 'C' in json_string:
- answer='C'
- if 'D' in json_string:
- answer='D'
- if 'E' in json_string:
- answer='E'
- else:
- completion = sfclient.chat.completions.create(
- #model="glm-4.5-flash",
- model=simplemodel(),
- messages=[
- {"role": "system", "content": "You are a helpful assistant.请将结果以JSON格式输出"},
- {"role": "user", "content": "问题描述: 给定一段内容: " + json_string + "。文字中给出了一个类似于A、B的字母作为答案,请输出这个答案。不需要输出分析过程"},
- ],
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": False},
- )
- json_string = completion.choices[0].message.content
- print(json_string)
- answer='A'
- if 'B' in json_string:
- answer='B'
- if 'C' in json_string:
- answer='C'
- if 'D' in json_string:
- answer='D'
- if 'E' in json_string:
- answer='E'
- return answer
- def handle_neiqiang(selected, data,aiclient,qwclient,sfclient,label_name,name_dw,candidates):
- options=[]
- options_=[]
- options.append('调和漆')##
- options.append('乳胶漆(水性水泥漆)')##
- options.append('砂胶喷涂')##
- options.append('多彩涂料喷涂')##一整套
- options.append('浮雕喷涂料')##一整套
- options.append('喷刷白水泥浆、石灰浆、石灰大白浆')##一整套
- options.append('水性防霉涂料')##
- letters = "ABCDEFGHIJKLMN"
- for i in range(len(options)):
- options_.append("给定选项" + letters[i]+",内容为"+options[i] )
- completion = aiclient.chat.completions.create(
- model="glm-4.5-air",
- #model="THUDM/GLM-Z1-9B-0414",
- #model="ernie-speed-128k",
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": "问题描述: 给定一段工作内容: " + data['label'] + " " + data['mc'] + " " + data['tz'] + ",".join(options_) + "。请选出与工作内容最匹配的涂料选项。如果没有特别匹配的选项,则默认选择选项B。请给出你的选择,请输出A、B这样的字母作为答案"},
- ],
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": True},
- #stream=True
- )
- #done_thinking = False
- #json_string=""
- #thinking_json_string=""
- #for chunk in completion:
- # thinking_chunk = chunk.choices[0].delta.reasoning_content
- # answer_chunk = chunk.choices[0].delta.content
- # if thinking_chunk != '':
- # thinking_json_string = thinking_json_string + thinking_chunk
- # elif answer_chunk != '':
- # if not done_thinking:
- # done_thinking = True
- # json_string = json_string + answer_chunk
- json_string = completion.choices[0].message.content
- #print(completion.choices[0].message.reasoning_content)
- print(json_string)
- if len(json_string) < 5:
- answer='A'
- if 'B' in json_string:
- answer='B'
- if 'C' in json_string:
- answer='C'
- if 'D' in json_string:
- answer='D'
- if 'E' in json_string:
- answer='E'
- if 'F' in json_string:
- answer='F'
- if 'G' in json_string:
- answer='G'
- else:
- completion = sfclient.chat.completions.create(
- #model="glm-4.5-flash",
- model=simplemodel(),
- messages=[
- {"role": "system", "content": "You are a helpful assistant.请将结果以JSON格式输出"},
- {"role": "user", "content": "问题描述: 给定一段内容: " + json_string + "。文字中给出了一个类似于A、B的字母作为答案,请输出这个答案。不需要输出分析过程"},
- ],
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": False},
- )
- json_string = completion.choices[0].message.content
- print(json_string)
- answer='A'
- if 'B' in json_string:
- answer='B'
- if 'C' in json_string:
- answer='C'
- if 'D' in json_string:
- answer='D'
- if 'E' in json_string:
- answer='E'
- if 'F' in json_string:
- answer='F'
- if 'G' in json_string:
- answer='G'
- if answer=='A':
- return [name_label['17-160']]
- if answer=='B':
- return [name_label['17-177']]
- if answer=='C':
- return [name_label['17-205']]
- if answer=='D':
- return [name_label['17-210']]
- if answer=='E':
- return [name_label['17-220']]
- if answer=='F':
- return [name_label['17-224']]
- if answer=='G':
- nizi = handle_nizi(selected, data,aiclient,qwclient,sfclient,label_name,name_dw,candidates)
- if nizi == 'A':
- return [name_label['17-CB5']]
- return [name_label['17-CB4']]##满刮腻子
- def handle_waiqiang(nizi, selected, data,aiclient,qwclient,sfclient,label_name,name_dw,candidates):
- options=[]
- options_=[]
- options.append('外墙丙烯酸弹性乳胶漆')##不包括腻子,二遍
- options.append('外墙苯丙乳胶漆')##不包括腻子,二遍
- options.append('外墙溶剂涂料')##一整套
- options.append('外墙弹性涂料')##包括两遍腻子,一底二面
- options.append('外墙批抗裂腻子(不刷涂料)')
- options.append('外墙彩砂喷涂')##包括腻子二遍,喷面层二遍
- options.append('喷涂外墙乳液型涂料')##一整套
- options.append('外墙真石漆')##一整套
- options.append('浮雕喷涂料外墙')##一整套
- letters = "ABCDEFGHIJKLMN"
- for i in range(len(options)):
- options_.append("给定选项" + letters[i]+",内容为"+options[i] )
- completion = aiclient.chat.completions.create(
- model="glm-4.5-air",
- #model="THUDM/GLM-Z1-9B-0414",
- #model="ernie-speed-128k",
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": "问题描述: 给定一段工作内容: " + data['label'] + " " + data['mc'] + " " + data['tz'] + ",".join(options_) + "。请选出与工作内容最匹配的外墙涂料选项。如果工作内容只提及外墙涂料,没有更细节的介绍,则选择选项D。请给出你的选择,请输出A、B这样的字母作为答案"},
- ],
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": True},
- #stream=True
- )
- #done_thinking = False
- #json_string=""
- #thinking_json_string=""
- #for chunk in completion:
- # thinking_chunk = chunk.choices[0].delta.reasoning_content
- # answer_chunk = chunk.choices[0].delta.content
- # if thinking_chunk != '':
- # thinking_json_string = thinking_json_string + thinking_chunk
- # elif answer_chunk != '':
- # if not done_thinking:
- # done_thinking = True
- # json_string = json_string + answer_chunk
- json_string = completion.choices[0].message.content
- #print(completion.choices[0].message.reasoning_content)
- print(json_string)
- if len(json_string) < 5:
- answer='A'
- if 'B' in json_string:
- answer='B'
- if 'C' in json_string:
- answer='C'
- if 'D' in json_string:
- answer='D'
- if 'E' in json_string:
- answer='E'
- if 'F' in json_string:
- answer='F'
- if 'G' in json_string:
- answer='G'
- if 'H' in json_string:
- answer='H'
- if 'I' in json_string:
- answer='I'
- else:
- completion = sfclient.chat.completions.create(
- #model="glm-4.5-flash",
- model=simplemodel(),
- messages=[
- {"role": "system", "content": "You are a helpful assistant.请将结果以JSON格式输出"},
- {"role": "user", "content": "问题描述: 给定一段内容: " + json_string + "。文字中给出了一个类似于A、B的字母作为答案,请输出这个答案。不需要输出分析过程"},
- ],
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": False},
- )
- json_string = completion.choices[0].message.content
- print(json_string)
- answer='A'
- if 'B' in json_string:
- answer='B'
- if 'C' in json_string:
- answer='C'
- if 'D' in json_string:
- answer='D'
- if 'E' in json_string:
- answer='E'
- if 'F' in json_string:
- answer='F'
- if 'G' in json_string:
- answer='G'
- if 'H' in json_string:
- answer='H'
- if 'I' in json_string:
- answer='I'
- if answer=='A':
- return [name_label['NT17-补22']] + nizi
- if answer=='B':
- return [name_label['17-192']] + nizi
- if answer=='C':
- return [name_label['17-199']] + nizi
- if answer=='D':
- return [name_label['17-197']] + nizi
- if answer=='E':
- return [name_label['17-195']]
- if answer=='F':
- return [name_label['17-202']] + nizi
- if answer=='G':
- return [name_label['17-207']]+ nizi
- if answer=='H':
- return [name_label['17-218']] + nizi
- if answer=='I':
- return [name_label['17-222']] + nizi
- def handle_mohui(selected, data,aiclient,qwclient,sfclient,label_name,name_dw,candidates):
- if '外墙' in data['tz']:
- result = []
- nizi = handle_nizi(selected, data,aiclient,qwclient,sfclient,label_name,name_dw,candidates)
- if nizi == 'C':
- result = [name_label['14-CB1(1)']]
- if nizi == 'D':
- result = [name_label['17-164']]
- if nizi == 'E':
- result = [name_label['17-164']]
- return handle_waiqiang(result, selected,data,aiclient,qwclient,sfclient,label_name,name_dw,candidates)
- return handle_neiqiang(selected,data,aiclient,qwclient,sfclient,label_name,name_dw,candidates)
- def postprocess0114(selected, data, aiclient, qwclient, sfclient, label_name, name_dw, candidates):
- mu =0
- jinshu=0
- mohui=0
- for entry in selected:
- if '17.1.1' in entry:
- mu = mu + 1
- if '17.1.2' in entry:
- jinshu = jinshu + 1
- if '17.1.3' in entry:
- mohui = mohui + 1
- if mohui > mu and mohui > jinshu:
- return handle_mohui(selected,data,aiclient,qwclient,sfclient,label_name,name_dw,candidates)
- prime = aifilter1(selected, data, aiclient, qwclient, sfclient, name_dw)
- if len(prime) == 0:
- selected = fallback(candidates, data, aiclient, qwclient, sfclient, None, None)
- return selected
-
- return prime
|