| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488 |
- import time
- from fallback import fallback
- from config import simplemodel
- from template import xuanxiang
- import json
- with open('zhaoping_rule', 'r') as f:
- content = f.read()
- import json
- obj=json.loads(content)
- with open('name_label', 'r') as f:
- content = f.read()
- import json
- name_label=json.loads(content)
- baohuceng = ['10-74', '10-75', '10-77', '10-78', '10-80', '10-81', '10-83', '10-84', '10-86', '10-87', '10-90']
- pair=[
- [name_label['14-1'],name_label['14-2']],
- [name_label['14-8'],name_label['14-10']],
- [name_label['14-9'],name_label['14-11']],
- [name_label['14-25'],name_label['14-26']],
- [name_label['14-37'],name_label['14-39']],
- [name_label['14-38'],name_label['14-40']],
- [name_label['14-48'],name_label['14-49']],
- [name_label['14-50'],name_label['14-52']],
- [name_label['14-51'],name_label['14-53']],
-
- ]
- from fallback import fallback
- def aifilter5(A, #options
- B, #data
- aiclient,
- qwclient,
- sfclient,
- dw):
- options=[]
- letters = "ABCDEFGHIJKLMN"
- for i in range(len(A)):
- options.append("给定选项" + letters[i]+",内容为"+A[i] )
- completion = aiclient.chat.completions.create(
- #model="THUDM/GLM-4-9B-0414",
- model="glm-4.5-air",
- #model="Qwen/Qwen3-8B",
- #model="ernie-speed-128k",
- messages=[
- {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
- {"role": "user", "content": "问题描述: " ",".join(options) + "。请问选项中是否有龙骨选项?" + '''
- 如果有,请回答
- {
- 'answer': '有'
- }
- 如果没有,请回答
- {
- 'answer': '没有'
- }
- '''
- },
- ],
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": True},
- #stream=True
- )
- json_string = completion.choices[0].message.content
- print(json_string)
- if len(json_string.replace(" ", "")) < 10:
- if '没有' in json_string:
- return False
- return True
- completion = sfclient.chat.completions.create(
- model=simplemodel(),
- messages=[
- {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
- {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个“有”或者“没有”的判断,请将该中文判断输出" + '''
- 如果有,请回答
- {
- 'answer': '有'
- }
- 如果没有,请回答
- {
- 'answer': '没有'
- }
- 你只需要输出结果,不要输出分析过程
- '''
- },
- ],
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": False},
- )
- json_string = completion.choices[0].message.content
- print(json_string)
- if '没有' in json_string:
- return False
- return True
- def aifilter6(A, #options
- B, #data
- aiclient,
- qwclient,
- sfclient,
- dw):
- options=[]
- letters = "ABCDEFGHIJKLMN"
- for i in range(len(A)):
- options.append("给定选项" + letters[i]+",内容为"+A[i] )
- completion = aiclient.chat.completions.create(
- #model="THUDM/GLM-4-9B-0414",
- model="glm-4.5-air",
- #model="Qwen/Qwen3-8B",
- #model="ernie-speed-128k",
- messages=[
- {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
- {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " + B['mc'] + " " + B['tz'] + "。请问该工作内容的描述中有提及混凝土墙、柱面免除抹灰吗?" + '''
- 如果有提及,请回答
- {
- 'answer': '有'
- }
- 如果没有提及,请回答
- {
- 'answer': '没有'
- }
- '''
- },
- ],
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": True},
- #stream=True
- )
- json_string = completion.choices[0].message.content
- print(json_string)
- if len(json_string.replace(" ", "")) < 10:
- if '没有' in json_string:
- return False
- return True
- completion = sfclient.chat.completions.create(
- model=simplemodel(),
- messages=[
- {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
- {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个“有”或者“没有”的判断,请将该中文判断输出" + '''
- 如果有,请回答
- {
- 'answer': '有'
- }
- 如果没有,请回答
- {
- 'answer': '没有'
- }
- 你只需要输出结果,不要输出分析过程
- '''
- },
- ],
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": False},
- )
- json_string = completion.choices[0].message.content
- print(json_string)
- if '没有' in json_string:
- return False
- return True
- def aifilter3(A, #options
- B, #data
- aiclient,
- qwclient,
- sfclient,
- dw):
- options=[]
- letters = "ABCDEFGHIJKLMN"
- for i in range(len(A)):
- options.append("给定选项" + letters[i]+",内容为"+A[i] )
- completion = aiclient.chat.completions.create(
- #model="THUDM/GLM-4-9B-0414",
- model="glm-4.5-air",
- #model="Qwen/Qwen3-8B",
- #model="ernie-speed-128k",
- messages=[
- {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
- {"role": "user", "content": "问题描述: 墙面装饰工程可分为外墙装饰或者内墙装饰。给定一段工作内容: " + B['label'] + " " + B['mc'] + " " + B['tz'] + "。请问该工作内容的描述指的是内墙还是外墙?" + '''
- 如果是外墙,请回答
- {
- 'answer': '外墙'
- }
- 如果是内墙,请回答
- {
- 'answer': '内墙'
- }
- 如果无法确定,请回答
- {
- 'answer': '不确定'
- }
- '''
- },
- ],
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": True},
- #stream=True
- )
- json_string = completion.choices[0].message.content
- print(json_string)
- if len(json_string.replace(" ", "")) < 10:
- if '外墙' in json_string:
- return False
- return True
- completion = sfclient.chat.completions.create(
- model=simplemodel(),
- messages=[
- {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
- {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个“外墙”或者“内墙”的判断,请将该中文判断输出" + '''
- 如果是外墙,请回答
- {
- 'answer': '外墙'
- }
- 如果是内墙,请回答
- {
- 'answer': '内墙'
- }
- 如果无法确定,请回答
- {
- 'answer': '不确定'
- }
- 你只需要输出结果,不要输出分析过程
- '''
- },
- ],
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": False},
- )
- json_string = completion.choices[0].message.content
- print(json_string)
- if '外墙' in json_string:
- return False
- return True
- def aifilter4(A, #options
- B, #data
- aiclient,
- qwclient,
- sfclient,
- dw):
- options=[]
- letters = "ABCDEFGHIJKLMN"
- for i in range(len(A)):
- options.append("给定选项" + letters[i]+",内容为"+A[i] )
- completion = aiclient.chat.completions.create(
- model="glm-4.5-air",
- #model="THUDM/GLM-Z1-9B-0414",
- #model="ernie-speed-128k",
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": " 特殊处理要求一:如果工作内容没有提及界面剂,则去掉所有含有“界面剂”字样的选项"},
- {"role": "user", "content": " 特殊处理要求二:如果选项中既有墙柱面一般抹灰的选项(夹板基层**不**属于一般抹灰,龙骨**不**属于一般抹灰,刷界面剂**不**属于一般抹灰),又有镶贴块料面层及幕墙的选项,则去掉墙柱面一般抹灰的选项"},
- {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
- {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
- {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " + B['mc'] + " " + B['tz'] + ",".join(options) + "。请根据处理要求做出处理,并返回结果, 删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
- ],
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": True},
- #stream=True
- )
- #done_thinking = False
- #json_string=""
- #thinking_json_string=""
- #for chunk in completion:
- # thinking_chunk = chunk.choices[0].delta.reasoning_content
- # answer_chunk = chunk.choices[0].delta.content
- # if thinking_chunk != '':
- # thinking_json_string = thinking_json_string + thinking_chunk
- # elif answer_chunk != '':
- # if not done_thinking:
- # done_thinking = True
- # json_string = json_string + answer_chunk
- json_string = completion.choices[0].message.content
- #print(completion.choices[0].message.reasoning_content)
- print(json_string)
- if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
- answer=[]
- if 'A' in json_string and len(A) > 0:
- answer.append(A[0])
- if 'B' in json_string and len(A) > 1:
- answer.append(A[1])
- if 'C' in json_string and len(A) > 2:
- answer.append(A[2])
- if 'D' in json_string and len(A) > 3:
- answer.append(A[3])
- if 'E' in json_string and len(A) > 4:
- answer.append(A[4])
- if 'F' in json_string and len(A) > 5:
- answer.append(A[5])
- if 'G' in json_string and len(A) > 6:
- answer.append(A[6])
- if 'H' in json_string and len(A) > 7:
- answer.append(A[7])
- if 'I' in json_string and len(A) > 8:
- answer.append(A[8])
- if 'J' in json_string and len(A) > 9:
- answer.append(A[9])
- return answer
- completion = sfclient.chat.completions.create(
- #model="glm-4.5-flash",
- model=simplemodel(),
- messages=xuanxiang(json_string),
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": False},
- )
- json_string = completion.choices[0].message.content
- print(json_string)
- answer=[]
- if 'A' in json_string and len(A) > 0:
- answer.append(A[0])
- if 'B' in json_string and len(A) > 1:
- answer.append(A[1])
- if 'C' in json_string and len(A) > 2:
- answer.append(A[2])
- if 'D' in json_string and len(A) > 3:
- answer.append(A[3])
- if 'E' in json_string and len(A) > 4:
- answer.append(A[4])
- if 'F' in json_string and len(A) > 5:
- answer.append(A[5])
- if 'G' in json_string and len(A) > 6:
- answer.append(A[6])
- if 'H' in json_string and len(A) > 7:
- answer.append(A[7])
- if 'I' in json_string and len(A) > 8:
- answer.append(A[8])
- if 'J' in json_string and len(A) > 9:
- answer.append(A[9])
- return answer
- def aifilter1(A, #options
- B, #data
- aiclient,
- qwclient,
- sfclient,
- dw):
- options=[]
- letters = "ABCDEFGHIJKLMN"
- for i in range(len(A)):
- options.append("给定选项" + letters[i]+",内容为"+A[i] )
- completion = aiclient.chat.completions.create(
- model="glm-4.5-air",
- #model="THUDM/GLM-Z1-9B-0414",
- #model="ernie-speed-128k",
- messages=[
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": " 特殊处理要求一:去掉所有刷素水泥浆的选项"},
- {"role": "user", "content": " 特殊处理要求二:去掉所有精确含有“天棚工程”四个字的选项,不得删除含有“天棚及其他”五个字的选项"},
- {"role": "user", "content": " 特殊处理要求三:如果工作内容中没有提及铝板、铝单板,则删除含有“铝板幕墙”四个字的选项"},
- {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
- {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
- {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " + B['mc'] + " " + B['tz'] + ",".join(options) + "。请根据处理要求做出处理,并返回结果, 删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
- ],
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": True},
- #stream=True
- )
- #done_thinking = False
- #json_string=""
- #thinking_json_string=""
- #for chunk in completion:
- # thinking_chunk = chunk.choices[0].delta.reasoning_content
- # answer_chunk = chunk.choices[0].delta.content
- # if thinking_chunk != '':
- # thinking_json_string = thinking_json_string + thinking_chunk
- # elif answer_chunk != '':
- # if not done_thinking:
- # done_thinking = True
- # json_string = json_string + answer_chunk
- json_string = completion.choices[0].message.content
- #print(completion.choices[0].message.reasoning_content)
- print(json_string)
- if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
- answer=[]
- if 'A' in json_string and len(A) > 0:
- answer.append(A[0])
- if 'B' in json_string and len(A) > 1:
- answer.append(A[1])
- if 'C' in json_string and len(A) > 2:
- answer.append(A[2])
- if 'D' in json_string and len(A) > 3:
- answer.append(A[3])
- if 'E' in json_string and len(A) > 4:
- answer.append(A[4])
- if 'F' in json_string and len(A) > 5:
- answer.append(A[5])
- if 'G' in json_string and len(A) > 6:
- answer.append(A[6])
- if 'H' in json_string and len(A) > 7:
- answer.append(A[7])
- if 'I' in json_string and len(A) > 8:
- answer.append(A[8])
- if 'J' in json_string and len(A) > 9:
- answer.append(A[9])
- return answer
- completion = sfclient.chat.completions.create(
- #model="glm-4.5-flash",
- model=simplemodel(),
- messages=xuanxiang(json_string),
- extra_body={"thinking": {"type": "disabled"}},
- #extra_body={"enable_thinking": False},
- )
- json_string = completion.choices[0].message.content
- print(json_string)
- answer=[]
- if 'A' in json_string and len(A) > 0:
- answer.append(A[0])
- if 'B' in json_string and len(A) > 1:
- answer.append(A[1])
- if 'C' in json_string and len(A) > 2:
- answer.append(A[2])
- if 'D' in json_string and len(A) > 3:
- answer.append(A[3])
- if 'E' in json_string and len(A) > 4:
- answer.append(A[4])
- if 'F' in json_string and len(A) > 5:
- answer.append(A[5])
- if 'G' in json_string and len(A) > 6:
- answer.append(A[6])
- if 'H' in json_string and len(A) > 7:
- answer.append(A[7])
- if 'I' in json_string and len(A) > 8:
- answer.append(A[8])
- if 'J' in json_string and len(A) > 9:
- answer.append(A[9])
- return answer
- def aifilter2(A, #options
- B, #data
- aiclient,
- qwclient,
- dw):
- hit_wumian = False
- for entry in A:
- if entry in obj['wumian']:
- hit_wumian=True
- hit_loumian = False
- loumian_entry = ''
- for entry in A:
- if entry in obj['loumian']:
- hit_loumian=True
- loumian_entry = entry
- if hit_wumian and hit_loumian:
- return [x for x in A if x != loumian_entry]
- return A
- def postprocess0112(selected, data, aiclient, qwclient, sfclient, label_name, name_dw, candidates):
- hit=[]
- hit_pair=[]
- for entry in selected:
- for p in pair:
- if p[1] == entry:
- hit = [entry]
- hit_pair = p
- if len(hit)>0:
- mian = aifilter6(selected, data, aiclient, qwclient, sfclient, name_dw)
- if mian:
- selected = [x for x in selected if not x == hit[0]]
- selected.append(hit_pair[0])
- selected = [x for x in selected if '木地板' not in x]
- selected = [x for x in selected if '铁件安装' not in x]
- selected = [x for x in selected if '铁件制作' not in x]
- if '防水砂浆' in data['tz'] or '防潮层' in data['tz']:
- if '第十章 屋面及防水工程 10.2 平面立面及其它防水 10.2.2 防水砂浆 (防水砂浆 1:2)防水砂浆 立面' not in selected:
- selected.append('第十章 屋面及防水工程 10.2 平面立面及其它防水 10.2.2 防水砂浆 (防水砂浆 1:2)防水砂浆 立面')
- neiqiang = aifilter3(selected, data, aiclient, qwclient, sfclient, name_dw)
- if neiqiang and len([x for x in selected if '外墙釉面砖' in x]) > 0:
- selected = [x for x in selected if not '外墙釉面砖' in x]
- selected.append('第十四章 墙柱面工程 14.3 镶贴块料面层及幕墙 14.3.1 瓷砖 单块面积0.18m2以内墙砖 砂浆粘贴 墙面')
- prime = aifilter1(selected, data, aiclient, qwclient, sfclient, name_dw)
- prime = aifilter4(prime, data, aiclient, qwclient, sfclient, name_dw)
- if len([x for x in prime if '钢骨架安装' in x or '钢骨架制作' in x or '钉在龙骨上' in x]) > 0:
- if data['dw'] == 'm2':
- t = [x for x in prime if '钢骨架安装' not in x and '钢骨架制作' not in x and '钉在龙骨上' not in x]
- longgu = aifilter5(t, data, aiclient, qwclient, sfclient, name_dw)
- if not longgu:
- prime = prime + [name_label['14-180']]
- prime = [x for x in prime if '钢骨架安装' not in x and '钢骨架制作' not in x]
-
- if len(prime) == 0:
- selected = fallback(candidates, data, aiclient, qwclient, sfclient, None, None)
- return selected
- if '界面剂' in data['tz']:
- if len([x for x in prime if '界面剂' in x])==0:
- prime.append('第十四章 墙柱面工程 14.1 一般抹灰 14.1.3 保温砂浆及抗裂基层 刷界面剂 混凝土面')
- return prime
|