xiaopzhang 2 месяцев назад
Родитель
Сommit
fdf296e1e3
61 измененных файлов с 5503 добавлено и 552 удалено
  1. 4 4
      JD_DanWeiGJ.csv
  2. 5 1
      README
  3. 8 4
      Service.js
  4. 0 0
      basic_rule
  5. 2 0
      celery_app.py
  6. 4 0
      config.py
  7. 4 4
      de/tj/JD_DanWeiGJ.csv
  8. 3 3
      dedata.py
  9. 24 2
      dianceng.py
  10. 206 0
      editor.js
  11. 39 5
      extra.py
  12. 6 0
      f_youqi.py
  13. 6 7
      fallback.py
  14. 1 0
      fuzhu_candidate
  15. 45 0
      fuzhu_util.py
  16. 42 17
      huansuan.py
  17. 15 23
      huansuan0103.py
  18. 12 17
      huansuan0105.py
  19. 18 10
      huansuan0106.py
  20. 6 7
      huansuan0108.py
  21. 6 7
      huansuan0110.py
  22. 93 0
      huansuan0111.py
  23. 52 0
      huansuan0112.py
  24. 0 0
      incremental_rule
  25. 1 1
      jieheceng.py
  26. 63 0
      mianceng.py
  27. 69 0
      mianji.py
  28. 26 10
      postprocess.py
  29. 17 21
      postprocess0101.py
  30. 26 26
      postprocess0103.py
  31. 10 11
      postprocess0104.py
  32. 257 27
      postprocess0105.py
  33. 217 42
      postprocess0106.py
  34. 18 20
      postprocess0108.py
  35. 234 48
      postprocess0109.py
  36. 83 79
      postprocess0110.py
  37. 330 68
      postprocess0111.py
  38. 594 0
      postprocess011105.py
  39. 487 0
      postprocess0112.py
  40. 578 0
      postprocess0113.py
  41. 414 0
      postprocess0114.py
  42. 116 0
      postprocess0115.py
  43. 251 18
      postprocess0117.py
  44. 60 3
      server.js
  45. 2 0
      service.py
  46. 329 64
      tasks.py
  47. 58 0
      template.py
  48. 505 0
      tihuan.py
  49. 4 0
      tihuan_bancai.py
  50. 4 0
      tihuan_dizhuan.py
  51. 4 0
      tihuan_fangshui.py
  52. 4 0
      tihuan_gai.py
  53. 4 0
      tihuan_gangcai.py
  54. 4 0
      tihuan_gangjin.py
  55. 28 0
      tihuan_hunningtu.py
  56. 4 0
      tihuan_juancai.py
  57. 65 0
      tihuan_shajiang.py
  58. 4 0
      tihuan_shicai.py
  59. 4 0
      tihuan_wa.py
  60. 4 0
      tihuan_zhuan.py
  61. 24 3
      util.py

+ 4 - 4
JD_DanWeiGJ.csv

@@ -3616,7 +3616,7 @@
 2322,2323,14-212,玻璃粘贴在 砂浆砖面墙面,,,1237.1,4.0,164.9,42.23,,20.27,,,1406.0,1468.5,,,10m2,,,,,00000297,,,000001;000301;000554;002110,,10,,,,,清理基层、弹线、下料、安装玻璃面层、磨砂打边、清理净面。,,,0,0.0,,,,2323
 2323,2324,14-213,硬木板条墙面20mm厚,,,548.02,8.95,225.25,58.55,,28.1,,,782.22,868.87,,,10m2,,,,,00000297,,,000001,,10,,,,,清理基层、定位下料、制作、铺钉面层、清理净面。,,,0,0.0,,,,2324
 2324,2325,14-214,竹片墙面,,,326.39,0.0,164.9,41.23,,19.79,,,491.29,552.31,,,10m2,,,,,00000297,,,000001,,10,,,,,清理基层、定位下料、制作、铺钉面层、清理净面。,,,0,0.0,,,,2325
-2325,2326,14-215,石膏板墙面,,,151.32,0.0,103.7,25.93,,12.44,,,255.02,293.39,,,10m2,,,,,00000297,,,000001,,10,,,,,清理基层、定位下料、制作、铺钉面层、清理净面。,,,0,0.0,,,,2326
+2325,2326,14-215,石膏板墙面,,,151.32,0.0,103.7,25.93,,12.44,,,255.02,293.39,,,10m2,,,,,00000297,,NT14-补10,000001,,10,,,,,清理基层、定位下料、制作、铺钉面层、清理净面。,,,0,0.0,,,,2326
 2326,2327,14-216,超细玻璃棉,,,136.75,0.0,62.05,15.51,,7.45,,,198.8,221.76,,,10m2,,,,,00000297,,,000001,,10,,,,,清理基层、定位下料、制作、铺钉面层、清理净面。,,,0,0.0,,,,2327
 2327,2328,14-217,水泥压力板,,,281.82,0.0,87.55,21.89,,10.51,,,369.37,401.77,,,10m2,,,,,00000297,,,000001,,10,,,,,清理基层、定位下料、制作、铺钉面层、清理净面。,,,0,0.0,,,,2328
 2328,2329,14-218,塑料扣板,,,212.51,0.0,95.2,23.8,,11.42,,,307.71,342.93,,,10m2,,,,,00000297,,,000001,,10,,,,,"1.清理基层、定位下料、制作、铺钉面层、清理净面。
@@ -3815,9 +3815,9 @@
 2380,2381,15-42,胶合板面层安装在木龙骨上 平面,,,127.55,0.0,88.4,22.1,,10.61,,,215.95,248.66,,,10m2,,,,,00000309,,,000001;000311;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2381
 2381,2382,15-43,胶合板面层安装在木龙骨上 分缝,,,127.55,0.0,95.2,23.8,,11.42,,,222.75,257.97,,,10m2,,,,,00000309,,,000001;000311;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2382
 2382,2383,15-44,胶合板面层安装在木龙骨上 凹凸,,,135.15,0.0,105.4,26.35,,12.65,,,240.55,279.55,,,10m2,,,,,00000309,,,000001;000311;002122;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2383
-2383,2384,15-45,纸面石膏板天棚面层 安装在U型轻钢龙骨上 平面,,,142.35,0.0,95.2,23.8,,11.42,,,237.55,272.77,,,10m2,,,,,00000310,,,000001;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2384
-2384,2385,15-46,纸面石膏板天棚面层 安装在U型轻钢龙骨上 凹凸,,,150.42,0.0,113.9,28.48,,13.67,,,264.32,306.47,,,10m2,,,,,00000310,,,000001;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2385
-2385,2386,15-47,纸面石膏板天棚面层 搁放在T型铝合金龙骨上,,,126.0,0.0,44.2,11.05,,5.3,,,170.2,186.55,,,10m2,,,,,00000310,,,000001;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2386
+2383,2384,15-45,纸面石膏板天棚面层 安装在U型轻钢龙骨上 平面,,,142.35,0.0,95.2,23.8,,11.42,,,237.55,272.77,,,10m2,,,,,00000310,,NT15-补1,000001;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2384
+2384,2385,15-46,纸面石膏板天棚面层 安装在U型轻钢龙骨上 凹凸,,,150.42,0.0,113.9,28.48,,13.67,,,264.32,306.47,,,10m2,,,,,00000310,,NT15-补1,000001;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2385
+2385,2386,15-47,纸面石膏板天棚面层 搁放在T型铝合金龙骨上,,,126.0,0.0,44.2,11.05,,5.3,,,170.2,186.55,,,10m2,,,,,00000310,,NT15-补1,000001;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2386
 2386,2387,15-48,面层贴在夹板基层上 普通切片板 平面,,,263.2,0.0,153.0,38.25,,18.36,,,416.2,472.81,,,10m2,,,,,00000311,,,000001;0B0013;0B0014,,10,,,,,清理基层、粘贴、安装面板等全部操作过程。,,,0,0.0,,,,2387
 2387,2388,15-49,面层贴在夹板基层上 普通切片板 凹凸,,,272.2,0.0,182.75,45.69,,21.93,,,454.95,522.57,,,10m2,,,,,00000311,,,000001;0B0013;0B0014,,10,,,,,清理基层、粘贴、安装面板等全部操作过程。,,,0,0.0,,,,2388
 2388,2389,15-50,铝合金(嵌入式)方板天棚面层 平板,,,945.0,0.0,73.1,18.28,,8.77,,,1018.1,1045.15,,,10m2,,,,,00000312,,,000001;0B0013;0B0014,,10,,,,,安装天棚面层、清理等全部操作过程。,,,0,0.0,,,,2389

+ 5 - 1
README

@@ -4,6 +4,9 @@ HUNYUAN sk-Sshjopx8L4M035UbwEs3Wb8OjhwI6HgkvJHjbSRWqdpzyzth
 BAIDU bce-v3/ALTAK-K98CKmtc7tJlL9YI0CUPb/48d71a9d697244aac26c76b626bfd6cba2178800
 SF sk-qurpsudpuorcunpfandrxapbyqaauthpxdhrdhsxdnklctgo
 wuwen sk-oxwdksnnrfkkbgcq
+SS sk-31nOThGPzpzqscQTI4qLX0pOMghhbYw6d9c4j9uot2C2rsEf
+cerebras csk-rh24venxkvmynyfkrdyfefvwdxp9x94kkw3ex53yy8j2hj5h
+groq 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
 对于回填土,定额套用的套路是这样的
 首先。要考虑土的来源。
 一种情况是,不需要运土
@@ -16,4 +19,5 @@ wuwen sk-oxwdksnnrfkkbgcq
 比如人工20%机械80%
 当然,你可以选择全是人工。考虑到机械施展不开。
 但是一般情况下,基坑回填是不能全是机械没有人工的
-
+金属面油漆,一般是防锈底漆加面漆,除非另行指定
+抹灰面油漆,先刮腻子,再涂油漆

+ 8 - 4
Service.js

@@ -1153,6 +1153,7 @@ class Service{
                         let selected = de['fuzhu'][i];//selected is 结构化的处理信息
                         let target = selected[0];
                         if (match_target(de['rcjdg'][j][1], de['rcjdg'][j][2], target, selected[1])) {
+				console.log('match fuzhu')
                             if (selected[2] == '系数' || selected[2] == '商品砼系数' || selected[2] == '除此机械外') {
                                 origin = origin * (Number(selected[3]) ** Number(selected[5]));
 
@@ -1511,7 +1512,8 @@ class Service{
         let clf_sum = 0;
         let zcf_sum = 0;
         for(let i = 1; i < data.length; i++) {
-            if (data[i][1].includes('000FE') && data[i][4] == '%')continue;//azfy
+            console.log(data[i][1]);
+	    if (data[i][1].includes('000FE') && data[i][4] == '%')continue;//azfy
             if (data[i][1]=='00EXP001' && data[i][4] == '%'){//回程费占人工费
                 continue;    
             }
@@ -1672,7 +1674,7 @@ class Service{
 
     }
 
-
+    //xuhao=['1*1','3*1']
     updateBeizhu(row, selected, xuhao) {//xuhao is for rename, selected is 结构化的处理信息
         let qd = this.cache.filter(x=>x["_children"].filter(y=>y['key']==row).length > 0)[0];
         if (!qd) return [null, null];
@@ -1752,7 +1754,7 @@ class Service{
         let de = qd["_children"].filter(x=>x['key'] == row)[0];
         let origin = de['数量']
         if (Number(origin) - Number(value) < 0.0001 && Number(origin) - Number(value) > -0.0001) {
-            return [false, null];
+            return [false, copy(this.cache)];
         }
         console.log("shuliang update");
         de['数量'] = value;
@@ -2366,7 +2368,9 @@ class Service{
         return input;
     }
 
-    
+    current(){
+      return copy(this.cache)
+    }   
 }
 
 

Разница между файлами не показана из-за своего большого размера
+ 0 - 0
basic_rule


+ 2 - 0
celery_app.py

@@ -4,4 +4,6 @@ celery_app = Celery(
 broker="redis://:Pheecian1@47.101.198.30:6379/0",
 backend="redis://:Pheecian1@47.101.198.30:6379/1",
 )
+celery_app.conf.update(redis_socket_timeout=30)
+celery_app.conf.update(redis_retry_on_timeout=True)
 celery_app.conf.update(task_track_started=True)

+ 4 - 0
config.py

@@ -0,0 +1,4 @@
+def simplemodel():
+    return "internlm3-latest"
+
+

+ 4 - 4
de/tj/JD_DanWeiGJ.csv

@@ -3616,7 +3616,7 @@
 2322,2323,14-212,玻璃粘贴在 砂浆砖面墙面,,,1237.1,4.0,164.9,42.23,,20.27,,,1406.0,1468.5,,,10m2,,,,,00000297,,,000001;000301;000554;002110,,10,,,,,清理基层、弹线、下料、安装玻璃面层、磨砂打边、清理净面。,,,0,0.0,,,,2323
 2323,2324,14-213,硬木板条墙面20mm厚,,,548.02,8.95,225.25,58.55,,28.1,,,782.22,868.87,,,10m2,,,,,00000297,,,000001,,10,,,,,清理基层、定位下料、制作、铺钉面层、清理净面。,,,0,0.0,,,,2324
 2324,2325,14-214,竹片墙面,,,326.39,0.0,164.9,41.23,,19.79,,,491.29,552.31,,,10m2,,,,,00000297,,,000001,,10,,,,,清理基层、定位下料、制作、铺钉面层、清理净面。,,,0,0.0,,,,2325
-2325,2326,14-215,石膏板墙面,,,151.32,0.0,103.7,25.93,,12.44,,,255.02,293.39,,,10m2,,,,,00000297,,,000001,,10,,,,,清理基层、定位下料、制作、铺钉面层、清理净面。,,,0,0.0,,,,2326
+2325,2326,14-215,石膏板墙面,,,151.32,0.0,103.7,25.93,,12.44,,,255.02,293.39,,,10m2,,,,,00000297,,NT14-补10,000001,,10,,,,,清理基层、定位下料、制作、铺钉面层、清理净面。,,,0,0.0,,,,2326
 2326,2327,14-216,超细玻璃棉,,,136.75,0.0,62.05,15.51,,7.45,,,198.8,221.76,,,10m2,,,,,00000297,,,000001,,10,,,,,清理基层、定位下料、制作、铺钉面层、清理净面。,,,0,0.0,,,,2327
 2327,2328,14-217,水泥压力板,,,281.82,0.0,87.55,21.89,,10.51,,,369.37,401.77,,,10m2,,,,,00000297,,,000001,,10,,,,,清理基层、定位下料、制作、铺钉面层、清理净面。,,,0,0.0,,,,2328
 2328,2329,14-218,塑料扣板,,,212.51,0.0,95.2,23.8,,11.42,,,307.71,342.93,,,10m2,,,,,00000297,,,000001,,10,,,,,"1.清理基层、定位下料、制作、铺钉面层、清理净面。
@@ -3815,9 +3815,9 @@
 2380,2381,15-42,胶合板面层安装在木龙骨上 平面,,,127.55,0.0,88.4,22.1,,10.61,,,215.95,248.66,,,10m2,,,,,00000309,,,000001;000311;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2381
 2381,2382,15-43,胶合板面层安装在木龙骨上 分缝,,,127.55,0.0,95.2,23.8,,11.42,,,222.75,257.97,,,10m2,,,,,00000309,,,000001;000311;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2382
 2382,2383,15-44,胶合板面层安装在木龙骨上 凹凸,,,135.15,0.0,105.4,26.35,,12.65,,,240.55,279.55,,,10m2,,,,,00000309,,,000001;000311;002122;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2383
-2383,2384,15-45,纸面石膏板天棚面层 安装在U型轻钢龙骨上 平面,,,142.35,0.0,95.2,23.8,,11.42,,,237.55,272.77,,,10m2,,,,,00000310,,,000001;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2384
-2384,2385,15-46,纸面石膏板天棚面层 安装在U型轻钢龙骨上 凹凸,,,150.42,0.0,113.9,28.48,,13.67,,,264.32,306.47,,,10m2,,,,,00000310,,,000001;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2385
-2385,2386,15-47,纸面石膏板天棚面层 搁放在T型铝合金龙骨上,,,126.0,0.0,44.2,11.05,,5.3,,,170.2,186.55,,,10m2,,,,,00000310,,,000001;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2386
+2383,2384,15-45,纸面石膏板天棚面层 安装在U型轻钢龙骨上 平面,,,142.35,0.0,95.2,23.8,,11.42,,,237.55,272.77,,,10m2,,,,,00000310,,NT15-补1,000001;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2384
+2384,2385,15-46,纸面石膏板天棚面层 安装在U型轻钢龙骨上 凹凸,,,150.42,0.0,113.9,28.48,,13.67,,,264.32,306.47,,,10m2,,,,,00000310,,NT15-补1,000001;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2385
+2385,2386,15-47,纸面石膏板天棚面层 搁放在T型铝合金龙骨上,,,126.0,0.0,44.2,11.05,,5.3,,,170.2,186.55,,,10m2,,,,,00000310,,NT15-补1,000001;0B0013;0B0014,,10,,,,,安装天棚面层,清理表面等全面操作过程。,,,0,0.0,,,,2386
 2386,2387,15-48,面层贴在夹板基层上 普通切片板 平面,,,263.2,0.0,153.0,38.25,,18.36,,,416.2,472.81,,,10m2,,,,,00000311,,,000001;0B0013;0B0014,,10,,,,,清理基层、粘贴、安装面板等全部操作过程。,,,0,0.0,,,,2387
 2387,2388,15-49,面层贴在夹板基层上 普通切片板 凹凸,,,272.2,0.0,182.75,45.69,,21.93,,,454.95,522.57,,,10m2,,,,,00000311,,,000001;0B0013;0B0014,,10,,,,,清理基层、粘贴、安装面板等全部操作过程。,,,0,0.0,,,,2388
 2388,2389,15-50,铝合金(嵌入式)方板天棚面层 平板,,,945.0,0.0,73.1,18.28,,8.77,,,1018.1,1045.15,,,10m2,,,,,00000312,,,000001;0B0013;0B0014,,10,,,,,安装天棚面层、清理等全部操作过程。,,,0,0.0,,,,2389

+ 3 - 3
dedata.py

@@ -134,9 +134,9 @@ def read_singledexilie2(zhuanye: int, debh: str):
         print("解析算式,系数为" + str(coef))
         result1_, result2_, rgde_, jxde_, clde_, bz_selected_, bz_selected2_, actual_zhuanye_ = service.getSingleDeXilie(zhuanye, debh)
         if result1_:
-            util.mergerg(rgde, rgde_, coef)
-            util.mergejx(jxde, jxde_, coef)
-            util.mergecl(clde, clde_, coef)
+            rgde = util.mergerg(rgde, rgde_, coef)
+            jxde = util.mergejx(jxde, jxde_, coef)
+            clde = util.mergecl(clde, clde_, coef)
             result3["rgde"] = rgde
             result3["jxde"] = jxde
             result3["clde"] = clde

+ 24 - 2
dianceng.py

@@ -26,6 +26,26 @@ answer2="""
 5、素土夯实,向外坡3%-5%
 6、与墙交接处聚乙烯泡沫塑料及密封膏嵌缝
 """
+example3='''
+给定的文字如下:
+
+1、用于标高46.25m处平屋面及坡屋面下平屋面 用于标高50.25m处平屋面 做法详见图纸设计及相关图集规范
+2、50厚C30细石混凝土,内配φ4@100双向钢筋网片,面层压光,设4mx4m分格缝(钢筋断开),缝宽15,内填嵌膏缝油
+3、10厚1:3石灰砂浆隔离层
+
+经过分析,这段文字描述的是屋面构造做法,包括混凝土面层和隔离层。其中没有提到垫层相关的施工内容(如砂石层、回填土等起支撑或排水作用的基层材料)。
+
+因此,该段文字中**没有垫层的施工工序**,原封不动返回如下:
+
+1、用于标高46.25m处平屋面及坡屋面下平屋面 用于标高50.25m处平屋面 做法详见图纸设计及相关图集规范
+2、50厚C30细石混凝土,内配φ4@100双向钢筋网片,面层压光,设4mx4m分格缝(钢筋断开),缝宽15,内填嵌膏缝油
+3、10厚1:3石灰砂浆隔离层
+'''
+answer3='''
+1、用于标高46.25m处平屋面及坡屋面下平屋面 用于标高50.25m处平屋面 做法详见图纸设计及相关图集规范
+2、50厚C30细石混凝土,内配φ4@100双向钢筋网片,面层压光,设4mx4m分格缝(钢筋断开),缝宽15,内填嵌膏缝油
+3、10厚1:3石灰砂浆隔离层
+'''
 def dianceng(A, aiclient, qwclient):
     completion = qwclient.chat.completions.create(
         model="Qwen/Qwen3-32B",
@@ -41,11 +61,13 @@ def dianceng(A, aiclient, qwclient):
     print(json_string)
     completion = aiclient.chat.completions.create(
         model="glm-4.5-flash",
+        extra_body={"thinking": {"type": "disabled"}},
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
-            {"role": "user", "content": "给定一段文字: " + json_string +   "\n 请抽取出文字中的结果"},
+            {"role": "user", "content": "给定一段文字:   请抽取出文字中的返回结果"},
+            {"role": "user", "content": "例如,给定一段文字: " + example3 +   "\n 请返回:\n"+answer3},
+            {"role": "user", "content": "现在,给定一段文字: " + json_string +   "\n 请参照例子,返回抽取出的结果,请直接返回结果"},
         ],
-        extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
     json_string = completion.choices[0].message.content

+ 206 - 0
editor.js

@@ -0,0 +1,206 @@
+
+
+function copy(input) {
+    return JSON.parse(JSON.stringify(input));
+}
+
+
+export const undo = () => {
+    let newData = Service.undo();
+    return newData;
+};
+
+export const undo_djcs = () => {
+    let newData = Service.undo_djcs();
+    return newData;
+};
+
+
+
+export const redo = () => {
+    let newData = Service.redo();
+    return newData;
+};
+
+export const redo_djcs = () => {
+    let newData = Service.redo_djcs();
+    return newData;
+};
+
+export const shanchu = ( selectedRow) => {
+   
+    
+    
+    let newData = Service.shanchu(selectedRow);
+    
+    return newData;
+  };
+
+  export const shanchu_djcs = ( selectedRow) => {
+   
+    
+    
+    let newData = Service.shanchu_djcs(selectedRow);
+    
+    return newData;
+  };
+
+
+////////////////////////////////QINGDAN//////////////////////////////
+
+
+
+
+export const danxiangdinge = (selectedRow) => {
+    return Service.danxiangdinge(selectedRow);
+};
+
+
+export const updateDercj = (row, data) => {
+    return Service.updateDercj(row, data);
+
+};
+
+
+export const huan = (replaceState, row) => {
+   
+    return Service.huan(replaceState['old'],replaceState['newBianhao'],replaceState['newName'], replaceState['newJia'], replaceState['zhuanye'],row);
+
+};
+
+export const updateShuliang = (data, row) => {
+    return Service.updateShuliang(data, row);
+
+};
+
+export const updateDeMingcheng = (data, row) => {
+    return Service.updateDeMingcheng(data, row);
+
+};
+
+
+
+export const changguidinge = (dingeclick, selectedRow) => {
+    return Service.changguidinge(dingeclick, selectedRow);
+
+};
+
+
+export const handleYuban = (derow, select ) => {
+    return Service.handleYuban(derow, select);
+
+};
+
+export const handleRcjbc = (derow, rcjbc ) => {
+    return Service.handleRcjbc(derow, rcjbc);
+
+};
+
+export const handleBeizhu = (beizhuFK/**辅库json */, derow, fuzhuSelect/*被选中的序号*/, fuzhu/*页面展示的附注文本*/  ) => {
+    console.log(beizhuFK);
+    let bianma = [];
+    let xuhao = [];
+    let fuzhuSelect_ = Array.from(fuzhuSelect);
+    let keys = beizhuFK['BZBH'];
+    for(let j = 0; j < fuzhuSelect_.length; j++) {
+        let entry = fuzhuSelect_[j];
+        for(let i = 0; i < fuzhu.length; i++) {
+            if (fuzhu[i]['key'] == entry) {
+                bianma.push(fuzhu[i]['编号'].toString().concat('*').concat(fuzhu[i]['数量'].toString()));
+                xuhao.push(fuzhu[i]['序号'].toString().concat("*").concat(fuzhu[i]['数量'].toString()));
+            }
+        }
+    }
+    let result = [];
+    for(let i = 0; i < bianma.length; i++) {
+        let bh_ = bianma[i];
+        let bh = bh_.split("*")[0];
+        for(let j = 0; j < Object.keys(keys).length; j++) {
+            let BZBH_ = Object.keys(keys)[j];
+            let BZBH = keys[BZBH_];
+            if (BZBH == bh) {
+                result.push([beizhuFK['BH'][BZBH_], beizhuFK['MC'][BZBH_], beizhuFK['LB'][BZBH_], beizhuFK['SL'][BZBH_], beizhuFK['XBH'][BZBH_], Number(bh_.split('*')[1])]);
+            }
+        }
+    }
+    console.log(result);
+    //return Service.updateBeizhu(derow, result, xuhao);
+    return result
+};
+
+export const handleAI = (result ) => {
+    return Service.handleAI(result);
+
+};
+export const handleAI_djcs = (result ) => {
+    return Service.handleAI_djcs(result);
+
+};
+///////////////////////////////////////////DJCS///////////////////////////////////////////
+export const handleYuban_djcs = (derow, select ) => {
+    return Service.handleYuban_djcs(derow, select);
+
+};
+export const handleBeizhu_djcs = (beizhuFK, derow, fuzhuSelect, fuzhu ) => {
+    console.log(beizhuFK);
+    let bianma = [];
+    let xuhao = [];
+    let fuzhuSelect_ = Array.from(fuzhuSelect);
+    let keys = beizhuFK['BZBH'];
+    for(let j = 0; j < fuzhuSelect_.length; j++) {
+        let entry = fuzhuSelect_[j];
+        for(let i = 0; i < fuzhu.length; i++) {
+            if (fuzhu[i]['key'] == entry) {
+                bianma.push(fuzhu[i]['编号'].toString().concat('*').concat(fuzhu[i]['数量'].toString()));
+                xuhao.push(fuzhu[i]['序号'].toString().concat("*").concat(fuzhu[i]['数量'].toString()));
+            }
+        }
+    }
+    let result = [];
+    for(let i = 0; i < bianma.length; i++) {
+        let bh_ = bianma[i];
+        let bh = bh_.split("*")[0];
+        for(let j = 0; j < Object.keys(keys).length; j++) {
+            let BZBH_ = Object.keys(keys)[j];
+            let BZBH = keys[BZBH_];
+            if (BZBH == bh) {
+                result.push([beizhuFK['BH'][BZBH_], beizhuFK['MC'][BZBH_], beizhuFK['LB'][BZBH_], beizhuFK['SL'][BZBH_], beizhuFK['XBH'][BZBH_], Number(bh_.split('*')[1])]);
+            }
+        }
+    }
+    console.log(result);
+    return Service.updateBeizhu_djcs(derow, result, xuhao);
+};
+
+
+export const danxiangdinge_djcs = (selectedRow) => {
+    return Service.danxiangdinge_djcs(selectedRow);
+};
+
+export const updateDercj_djcs = (row, data) => {
+    return Service.updateDercj_djcs(row, data);
+
+};
+
+export const updateShuliang_djcs = (data, row) => {
+    return Service.updateShuliang_djcs(data, row);
+
+};
+
+export const changguidinge_djcs = (dingeclick, selectedRow) => {
+    return Service.changguidinge_djcs(dingeclick, selectedRow);
+
+};
+
+export const azfy_djcs_eligible = (selectedRow) => {
+    return Service.azfy_djcs_eligible(selectedRow);
+
+};
+
+export const azfy_djcs = (selectedRow, dinge, selected) => {
+    return Service.azfy_djcs(selectedRow, dinge, selected);
+
+};
+
+
+

+ 39 - 5
extra.py

@@ -1,11 +1,17 @@
 import time
+from config import simplemodel
 from menchuangfallback import menchuangfallback
 def extra(
        data, #data
        aiclient,
        qwclient,
+       sfclient,
        menchuang_collection,
-       model):
+       model,
+       qita_collection,
+       ):
+    if '高强螺栓' in data['mc']:
+        return '高强螺栓'
     if data['bianma'].startswith("0108"):
         sentence=["特征描述:" + data['mc'] + "\n" + data['tz']]
         embeddings = model.encode(sentence)
@@ -28,8 +34,30 @@ def extra(
         answers = [x for x in answers if ':' in x ]
         answer2 = answers[0].split(":")[1].replace(" ", "")
         return answer2
+    if data['bianma'].startswith("0115"):
+        sentence=["特征描述:" + data['mc'] + "\n" + data['tz']]
+        embeddings = model.encode(sentence)
+        result = qita_collection.query(query_embeddings=embeddings, n_results=10)
+        print(result['documents'][0])
+        l = len([x for x in result['distances'][0] if x < 0.5])
+        if l < 2:
+            l = 2
+        completion = aiclient.chat.completions.create(
+            model="glm-4.5-flash",
+            messages=[
+                {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+                {"role": "user", "content": "特征描述往往比较具体,工作内容是对特征描述的主要关键的总结提炼。以下是一些特征描述以及对应的提炼的工作内容的例子。" + '\n\n'.join(result['documents'][0][:l]) + "给定一段特征描述,内容为" + data['mc'] +data['tz'] + "。请参照示例,给出提炼的工作内容(提炼的工作内容中不得出现类似详见图纸、图集的表述). 注意,不需要输出特征描述,仅输出工作内容"},
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+        )
+        json_string = completion.choices[0].message.content
+        print(json_string)
+        answers = json_string.split("\n")
+        answers = [x for x in answers if ':' in x ]
+        answer2 = answers[0].split(":")[1].replace(" ", "")
+        return answer2
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
             {"role": "user", "content": " 背景知识:已知预应力高强混凝土管桩(PHC)代号定义为PHC-AAA(BB)CC-DDD-E1,E2,E3,E4,其中AAA代表管桩外径,BB代表管桩壁厚,CC表示型号,DDD表示混凝土强度等级,E1/E2/E3/E4表示分段桩长。例如,PHC-500(125)-AB-C80-9,7 表示外径500mm,壁厚125mm,型号AB,混凝土强度C80, 分段桩长分别为9米和7米,总桩长16米,施工时需要将两个分段接桩"},
@@ -62,18 +90,24 @@ def need_extra(
        data, #data
        aiclient,
        qwclient,
+       sfclient,
        result):
     if data['bianma'].startswith("0108") and len(result) == 0:
         return True
+    if data['bianma'].startswith("0115") and len(result) == 0:
+        return True
+    if '高强螺栓' in data['mc']:
+        return True
     time.sleep(1)
     completion = qwclient.chat.completions.create(
-        model="ZhipuAI/GLM-4.6",
-        #model="glm-4.5-flash",
+        model="Qwen/Qwen3-32B",
+        #model="THUDM/GLM-4-9B-0414",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
         {"role": "user", "content": "问题描述:  给定一段工作内容描述,内容为" + data['mc'] +data['tz'] + "。请判断内容是否属于打桩、压桩。请回答是或者否"},
         ],
-        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"thinking": {"type": "disabled"}},
+        extra_body={"enable_thinking":  False},
     )
     json_string = completion.choices[0].message.content
     print(json_string)

+ 6 - 0
f_youqi.py

@@ -0,0 +1,6 @@
+def f_youqi(data,youqi,collection,model,ce,name_label):
+    if '环氧富锌' in youqi:
+        return [name_label['17-158'],name_label['17-159'],name_label['17-159']]
+    if '氟碳漆' in youqi:
+        return [name_label['17-157']]
+    return [name_label['17-135'],name_label['17-132']]

+ 6 - 7
fallback.py

@@ -1,5 +1,7 @@
 from menchuangfallback import menchuangfallback
-def fallback(A, B, aiclient, qwclient, menchuang_collection, model):
+from template import xuanxiang
+from config import simplemodel
+def fallback(A, B, aiclient, qwclient, sfclient, menchuang_collection, model):
     if B['bianma'].startswith("0109"):
         if B['bianma'].startswith("010902") and '非固化' in B['tz'] and '沥青防水涂料' in B['tz']:##屋面防水
             return ['第十章  屋面及防水工程 10.2  平面立面及其它防水 10.2.1  涂刷油类 水泥基渗透结晶 防水材料 二~三遍(厚2mm)'] ##需要换
@@ -37,12 +39,9 @@ def fallback(A, B, aiclient, qwclient, menchuang_collection, model):
     json_string = completion.choices[0].message.content
     print(json_string)
     
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B,C]的数组作为答案,请将该最终答案输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
         extra_body={"thinking": {"type": "disabled"}},
     )
     json_string = completion.choices[0].message.content

+ 1 - 0
fuzhu_candidate

@@ -0,0 +1 @@
+["1-143", "1-147", "1-198", "1-2", "1-200", "1-201", "1-204", "1-205", "1-214", "1-215", "1-216", "1-217", "1-222", "1-223", "1-224", "1-225", "1-262", "1-263", "1-264", "1-265", "1-266", "1-273", "1-274", "13-149", "13-152", "13-153", "13-164", "13-18", "13-19", "13-7", "13-9", "14-10", "14-26", "15-45", "15-5", "16-198", "16-50", "17-132", "17-133", "17-136", "17-142", "17-143", "17-157", "17-168", "17-172", "17-176", "17-177", "17-181", "2-11", "2-14", "2-32", "21-103", "21-12", "21-141", "21-2", "21-27", "21-29", "21-32", "21-34", "21-36", "21-42", "21-44", "21-46", "21-48", "21-50", "21-52", "21-57", "21-59", "21-61", "21-63", "21-67", "21-69", "21-74", "21-76", "21-78", "21-8", "21-82", "21-87", "21-89", "21-90", "21-92", "21-94", "21-96", "3-13", "3-14", "3-15", "3-16", "3-18", "3-19", "3-20", "3-21", "3-22", "3-23", "3-24", "3-25", "3-27", "3-92", "3-93", "3-94", "4-10", "4-101", "4-11", "4-7", "4-8", "4-9", "5-1", "5-2", "5-3", "5-4", "6-190", "6-191", "6-192", "6-194", "6-195", "6-196", "6-197", "6-200", "6-201", "6-202", "6-204", "6-207", "6-209", "6-211", "6-213", "6-215", "6-216", "6-217", "6-218", "6-219", "6-221", "6-222", "6-226", "6-227", "7-35", "7-62", "7-63", "8-112", "8-113", "8-115", "8-116", "8-130", "8-132", "8-134", "8-137", "8-138", "8-139", "8-140", "8-141", "8-143", "8-149", "8-93", "NT17-\u88655", "NT3-\u88659"]

+ 45 - 0
fuzhu_util.py

@@ -0,0 +1,45 @@
+from config import simplemodel
+def fuzhu_util(
+       label,
+       options,
+       work,
+       aiclient,
+       qwclient,
+       sfclient
+       ):
+    xuanxiang=[]
+    choice=[]
+    letters='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+    for i in range(len(options)):
+        xuanxiang.append("给定修正、补充选项:" + options[i] + ', 记作' + letters[i])
+    completion = aiclient.chat.completions.create(
+            model="glm-4.5-air",
+            messages=[
+                {"role": "system", "content": "You are a helpful assistant."},
+                {"role": "user", "content": "以下是一段工作内容的描述:" + work + '\n'},
+                {"role": "user", "content": "该工作内容的描述中包含了一道工序如下:" + label + '\n'},
+                {"role": "user", "content": "该工序与工作内容可能有稍许偏差,需要修正或补充。 " + '; '.join(xuanxiang) + "\n请从上述选项中选择出符合工作内容描述,确实可以修正、补充的选项并返回。例如,选项中如果有除锈等级的修正,则你需要分析工作内容描述中是否明确除锈等级。如果明确除锈等级,你应该选择对应选项。如果没有,则不得选择。再比如,选项中提及原材料每米重量5kg以内为小型构件。那么你要分析工作内容中是否给出原材料每米重量,如果给出,则你可以据此做出选择,如果没有给出,你不得选择该选项。再比如,选项中提及,(如果)设计采用螺栓的,如何如何,那么你要分析工作内容,如果工作内容明确描述设计使用螺栓的,你可以据此做出选择,如果没有,则你不得选择。请给出分析并返回类似[A, B]这样的数组。如果没有任何选项符合,则返回空数组[]"},
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+        )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if len(json_string) < 8:
+        for i in range(len(options)):
+            if letters[i] in json_string:
+                choice.append(letters[i])
+    else:
+        completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个 数组作为答案,数组可能是个空数组,也可能包含了类似A、B、C的字母,请将该答案输出。清直接输出答案,不用输出过程"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+    )
+        json_string = completion.choices[0].message.content
+        for i in range(len(options)):
+            if letters[i] in json_string:
+                choice.append(letters[i])
+
+    return choice

+ 42 - 17
huansuan.py

@@ -8,20 +8,44 @@ from huansuan0106 import callzaihuansuan0106_2
 from huansuan0106 import callzaihuansuan0106_3
 from huansuan0108 import callzaihuansuan0108_1
 from huansuan0110 import callzaihuansuan0110_1
-
-def callzaihuansuan(bianma, label, A,B,C, aiclient, qwclient):
+from huansuan0111 import callzaihuansuan0111_1
+from huansuan0111 import callzaihuansuan0111_2
+from huansuan0111 import callzaihuansuan0111_3
+from huansuan0112 import callzaihuansuan0112_1
+import time
+def callzaihuansuan(bianma, label, A,B,C, aiclient, qwclient, sfclient):
+    time.sleep(1)
     if bianma.startswith("0103"):
         if '送桩 桩长' in label or '送桩  桩长' in label:
-            return callzaihuansuan0103_1(bianma, A, B, C, aiclient)
+            return callzaihuansuan0103_1(bianma, A, B, C, aiclient, sfclient)
         elif '电焊接桩' in label:
-            return callzaihuansuan0103_3(bianma, A, B, C, aiclient, qwclient)
+            return callzaihuansuan0103_3(bianma, A, B, C, aiclient, qwclient, sfclient)
         else:
-            return callzaihuansuan0103_2(bianma, A, B, C, aiclient, qwclient)
+            return callzaihuansuan0103_2(bianma, A, B, C, aiclient, qwclient, sfclient)
+    if bianma.startswith("0109"):
+        print(label)
+        print(A)
+        if '带肋钢筋' in label and 'm2' in A:##
+            return callzaihuansuan0111_1(bianma, label, A, B, C, aiclient, qwclient, sfclient)
     if bianma.startswith("0110"):
         print(label)
         print(A)
         if '找坡' in label and 'm2' in A:##混凝土找坡的单位是m3
-            return callzaihuansuan0110_1(bianma, label, A, B, C, aiclient, qwclient)
+            return callzaihuansuan0110_1(bianma, label, A, B, C, aiclient, qwclient, sfclient)
+    if bianma.startswith("0111"):
+        print(label)
+        print(A)
+        if '带肋钢筋' in label and 'm2' in A:##
+            return callzaihuansuan0111_1(bianma, label, A, B, C, aiclient, qwclient, sfclient)
+        elif '垫层' in label and 'm2' in A:##垫层的单位是m3
+            return callzaihuansuan0111_2(bianma, label, A, B, C, aiclient, qwclient, sfclient)
+        elif '踢脚线' in label and 'm2' in A:##踢脚线的单位是10m
+            return callzaihuansuan0111_3(bianma, label, A, B, C, aiclient, qwclient, sfclient)
+    if bianma.startswith("0112"):
+        print(label)
+        print(A)
+        if '隔断' in label and 'm2' in A:##可能单位是10间
+            return callzaihuansuan0112_1(bianma, label, A, B, C, aiclient, qwclient, sfclient)
     if bianma.startswith("0105"):
         print(label)
         print(A)
@@ -32,9 +56,9 @@ def callzaihuansuan(bianma, label, A,B,C, aiclient, qwclient):
             }    
             """
         elif '垫层' in label and 'm2' in A:##垫层的单位是m3
-            return callzaihuansuan0105_1(bianma, label, A, B, C, aiclient, qwclient)
+            return callzaihuansuan0105_1(bianma, label, A, B, C, aiclient, qwclient, sfclient)
         elif '成品不锈钢盖板安装' in label and '套' in A:##垫层的单位是m3
-            return callzaihuansuan0105_2(bianma, label, A, B, C, aiclient, qwclient)
+            return callzaihuansuan0105_2(bianma, label, A, B, C, aiclient, qwclient, sfclient)
         elif '油膏' in label and 'm2' in A and '散水' in C:##伸缩缝的单位是10m
             return """
                  {
@@ -44,15 +68,16 @@ def callzaihuansuan(bianma, label, A,B,C, aiclient, qwclient):
     if bianma.startswith("0106"):
         print(A)
         print(B)
-        if 't' in A and 'm2' in B and '10m2' not in B and '100m2' not in B:
-            return callzaihuansuan0106_1(bianma, label, A, B, C, aiclient, qwclient)
-        if 't' in A and '10m2' in B:
-            return callzaihuansuan0106_2(bianma, label, A, B, C, aiclient, qwclient)
-        if 't' in A and '100m2' in B:
-            return callzaihuansuan0106_3(bianma, label, A, B, C, aiclient, qwclient)
-    if bianma.startswith("0108"):
-        if '樘' in A and 'm2' in B:
-            return callzaihuansuan0108_1(bianma, label, A, B, C, aiclient, qwclient)
+        if '油漆' in label:
+            if 't' in A and 'm2' in B and '10m2' not in B and '100m2' not in B:
+                return callzaihuansuan0106_1(bianma, label, A, B, C, aiclient, qwclient, sfclient)
+            if 't' in A and '10m2' in B:
+                return callzaihuansuan0106_2(bianma, label, A, B, C, aiclient, qwclient, sfclient)
+            if 't' in A and '100m2' in B:
+                return callzaihuansuan0106_3(bianma, label, A, B, C, aiclient, qwclient, sfclient)
+        if bianma.startswith("0108"):
+            if '樘' in A and 'm2' in B:
+                return callzaihuansuan0108_1(bianma, label, A, B, C, aiclient, qwclient, sfclient)
 
     return """
        {"answer": "0"}

+ 15 - 23
huansuan0103.py

@@ -1,5 +1,6 @@
-
-def callzaihuansuan0103_1(bianma, A,B,C, aiclient):
+from template import expression
+from config import simplemodel
+def callzaihuansuan0103_1(bianma, A,B,C, aiclient, sfclient):
     completion = aiclient.chat.completions.create(
         model="glm-4.5-flash",
         messages=[
@@ -12,25 +13,22 @@ def callzaihuansuan0103_1(bianma, A,B,C, aiclient):
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A=B的表达式作为答案,请将该最终答案输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=expression(json_string),
         extra_body={"thinking": {"type": "disabled"}},
     )
     json_string = completion.choices[0].message.content
     print(json_string)
     return json_string
 
-def callzaihuansuan0103_2(bianma, A,B,C, aiclient, qwclient):
+def callzaihuansuan0103_2(bianma, A,B,C, aiclient, qwclient, sfclient):
     print("0103_2")
     print(A)
     print(B)
     print(C)
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
         #model="Qwen/Qwen3-14B",
         #model="qwen3-14b",
         messages=[
@@ -44,19 +42,16 @@ def callzaihuansuan0103_2(bianma, A,B,C, aiclient, qwclient):
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A=B的表达式作为答案,表达式中不含有圆周率π, 请将该最终答案输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=expression(json_string),
         extra_body={"thinking": {"type": "disabled"}},
     )
     json_string = completion.choices[0].message.content
     print(json_string)
     return json_string
 
-def callzaihuansuan0103_3(bianma, A,B,C, aiclient, qwclient):
+def callzaihuansuan0103_3(bianma, A,B,C, aiclient, qwclient, sfclient):
     print("0103_3")
     print(A)
     print(B)
@@ -76,12 +71,9 @@ def callzaihuansuan0103_3(bianma, A,B,C, aiclient, qwclient):
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A=B的表达式作为答案,请将该最终答案输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=expression(json_string),
         extra_body={"thinking": {"type": "disabled"}},
     )
     json_string = completion.choices[0].message.content

+ 12 - 17
huansuan0105.py

@@ -1,7 +1,8 @@
-
-def callzaihuansuan0105_1(bianma, label, A,B,C, aiclient, qwclient):##C->tz
+from template import expression
+from config import simplemodel
+def callzaihuansuan0105_1(bianma, label, A,B,C, aiclient, qwclient, sfclient):##C->tz
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
             {"role": "user", "content": " 背景知识:厚度可以用单位米(m)来衡量,可以用单位厘米(cm)来衡量,可以用单位毫米(mm)来衡量;如果没有明确单位,默认指毫米"},
@@ -11,21 +12,18 @@ def callzaihuansuan0105_1(bianma, label, A,B,C, aiclient, qwclient):##C->tz
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A=B的表达式作为答案,请将该最终答案输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=expression(json_string),
         extra_body={"thinking": {"type": "disabled"}},
     )
     json_string = completion.choices[0].message.content
     print(json_string)
     return json_string
 
-def callzaihuansuan0105_2(bianma, label, A,B,C, aiclient, qwclient):##C->tz
+def callzaihuansuan0105_2(bianma, label, A,B,C, aiclient, qwclient, sfclient):##C->tz
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
             {"role": "user", "content": " 背景知识:长度可以用单位米(m)来衡量,可以用单位厘米(cm)来衡量,可以用单位毫米(mm)来衡量;如果没有明确单位,默认指毫米"},
@@ -35,12 +33,9 @@ def callzaihuansuan0105_2(bianma, label, A,B,C, aiclient, qwclient):##C->tz
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A=B的表达式作为答案,请将该最终答案输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=expression(json_string),
         extra_body={"thinking": {"type": "disabled"}},
     )
     json_string = completion.choices[0].message.content

+ 18 - 10
huansuan0106.py

@@ -1,4 +1,5 @@
 import re
+from config import simplemodel
 with open('gangjiegouhuansuan','r') as f:
     content = f.read()
 import json
@@ -7,9 +8,9 @@ options=[]
 for i in range(len(obj['mc'])):
     options.append('给定选项A'+str(i)+', 内容为'+obj['mc'][i])
 options = ','.join(options)
-def callzaihuansuan0106_0(bianma, label, A,B,C, aiclient, qwclient):##C->tz
+def callzaihuansuan0106_0(bianma, label, A,B,C, aiclient, qwclient, sfclient):##C->tz
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
         {"role": "user", "content": "问题描述:  给定一段工作内容描述,内容为" + C + "," + options +  ",请选出最匹配工作内容的选项并输出。例如如果你觉得A11选项最匹配,请输出A11"},
@@ -20,8 +21,8 @@ def callzaihuansuan0106_0(bianma, label, A,B,C, aiclient, qwclient):##C->tz
     print(json_string)
     if len(json_string) < 4:
         return json_string
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
         messages=[
             {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
             {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A10的表达式作为结果,请将该最终结果输出"},                       
@@ -32,28 +33,35 @@ def callzaihuansuan0106_0(bianma, label, A,B,C, aiclient, qwclient):##C->tz
     print(json_string)
     lines = json_string.split('\n')
     lines = [x for x in lines if ':' in x]
+    lines = [x for x in lines if 'A' in x]
     line = lines[0].split(':')[1]
     line = line.replace('\'', '')
     line = line.replace('\"', '')
     matched_letters = re.findall(r'[a-zA-Z0-9]', line)
     return ''.join(matched_letters)
     
-def callzaihuansuan0106_1(bianma, label, A,B,C, aiclient, qwclient):##C->tz
-    t =  callzaihuansuan0106_0(bianma, label, A,B,C, aiclient, qwclient)
+def callzaihuansuan0106_1(bianma, label, A,B,C, aiclient, qwclient, sfclient):##C->tz
+    t =  callzaihuansuan0106_0(bianma, label, A,B,C, aiclient, qwclient, sfclient)
+    print(t)
     t=t.replace('A', '')
+    t=t.replace('H', '')
     t = int(t)
     t = obj['alpha'][t]
     return "{" + "\n" + "answer: A=" + str(t) + "*B\n}"
-def callzaihuansuan0106_2(bianma, label, A,B,C, aiclient, qwclient):##C->tz
-    t =  callzaihuansuan0106_0(bianma, label, A,B,C, aiclient, qwclient)
+def callzaihuansuan0106_2(bianma, label, A,B,C, aiclient, qwclient, sfclient):##C->tz
+    t =  callzaihuansuan0106_0(bianma, label, A,B,C, aiclient, qwclient, sfclient)
+    print(t)
     t=t.replace('A', '')
+    t=t.replace('H', '')
     t = int(t)
     t = obj['alpha'][t]/10
     return "{" + "\n" + "answer: A=" + str(t) + "*B\n}"
 
-def callzaihuansuan0106_3(bianma, label, A,B,C, aiclient, qwclient):##C->tz
-    t =  callzaihuansuan0106_0(bianma, label, A,B,C, aiclient, qwclient)
+def callzaihuansuan0106_3(bianma, label, A,B,C, aiclient, qwclient, sfclient):##C->tz
+    t =  callzaihuansuan0106_0(bianma, label, A,B,C, aiclient, qwclient, sfclient)
+    print(t)
     t=t.replace('A', '')
+    t=t.replace('H', '')
     t = int(t)
     t = obj['alpha'][t]/100
     return "{" + "\n" + "answer: A=" + str(t) + "*B\n}"

+ 6 - 7
huansuan0108.py

@@ -1,4 +1,6 @@
 import re
+from config import simplemodel
+from template import expression
 with open('gangjiegouhuansuan','r') as f:
     content = f.read()
 import json
@@ -7,7 +9,7 @@ options=[]
 for i in range(len(obj['mc'])):
     options.append('给定选项A'+str(i)+', 内容为'+obj['mc'][i])
 options = ','.join(options)
-def callzaihuansuan0108_1(bianma, label, A,B,C, aiclient, qwclient):##C->tz
+def callzaihuansuan0108_1(bianma, label, A,B,C, aiclient, qwclient, sfclient):##C->tz
     completion = aiclient.chat.completions.create(
         model="glm-4.5-flash",
         #model="Qwen/Qwen3-14B",
@@ -21,12 +23,9 @@ def callzaihuansuan0108_1(bianma, label, A,B,C, aiclient, qwclient):##C->tz
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A=B的表达式作为答案,表达式中不含有圆周率π, 请将该最终答案输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=expression(json_string),
         extra_body={"thinking": {"type": "disabled"}},
     )
     json_string = completion.choices[0].message.content

+ 6 - 7
huansuan0110.py

@@ -1,4 +1,6 @@
 import re
+from config import simplemodel
+from template import expression
 with open('gangjiegouhuansuan','r') as f:
     content = f.read()
 import json
@@ -7,7 +9,7 @@ options=[]
 for i in range(len(obj['mc'])):
     options.append('给定选项A'+str(i)+', 内容为'+obj['mc'][i])
 options = ','.join(options)
-def callzaihuansuan0110_1(bianma, label, A,B,C, aiclient, qwclient):##C->tz
+def callzaihuansuan0110_1(bianma, label, A,B,C, aiclient, qwclient, sfclient):##C->tz
     completion = aiclient.chat.completions.create(
         model="glm-4.5-flash",
         #model="Qwen/Qwen3-14B",
@@ -22,12 +24,9 @@ def callzaihuansuan0110_1(bianma, label, A,B,C, aiclient, qwclient):##C->tz
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A=B的表达式作为答案,表达式中不含有圆周率π, 请将该最终答案输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=expression(json_string),
         extra_body={"thinking": {"type": "disabled"}},
     )
     json_string = completion.choices[0].message.content

+ 93 - 0
huansuan0111.py

@@ -0,0 +1,93 @@
+import re
+from config import simplemodel
+from template import expression
+with open('gangjiegouhuansuan','r') as f:
+    content = f.read()
+import json
+obj = json.loads(content)
+options=[]
+for i in range(len(obj['mc'])):
+    options.append('给定选项A'+str(i)+', 内容为'+obj['mc'][i])
+options = ','.join(options)
+def callzaihuansuan0111_1(bianma, label, A,B,C, aiclient, qwclient, sfclient):##C->tz
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-flash",
+        #model="Qwen/Qwen3-14B",
+        #model="qwen3-14b",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": "在施工上下文中,钢筋网片规格一般描述为Φ4@200,表示直径4毫米,间距200毫米。再比如,Φ6@150,表示直径6毫米,间距150毫米"},
+        {"role": "user", "content": "问题描述:  给定一段工作内容描述,内容为" + C + ",请找出钢筋网片的规格并返回,例如,如果规格为Φ4@200,请返回Φ4@200,如果没有找到,则返回miss"},
+        ],
+        #extra_body={"enable_thinking": False},
+        extra_body={"thinking": {"type": "disabled"}},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if '@' in json_string:
+        parts = json_string.split('@')
+        pa = re.compile(r'\d+')
+        zhijing = pa.search(parts[0][-10:]).group()
+        jianju = pa.search(parts[1][:10]).group()
+        print('zhijing')
+        print(zhijing)
+        print('jianju')
+        print(jianju)
+        zhijing = int(zhijing)
+        jianju = int(jianju)
+        midu = 2 / jianju
+        weight = 0.00617 * zhijing * zhijing * midu
+        weight = 'A=' + str(weight) + '*B'
+        return '''
+        {
+        'answer': ''' + weight + '''
+        }
+        '''
+    else:
+        return '''
+    {
+    'answer': 'A<>B'
+    }
+    '''
+
+    
+def callzaihuansuan0111_2(bianma, label, A,B,C, aiclient, qwclient, sfclient):##C->tz
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-flash",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 背景知识:厚度可以用单位米(m)来衡量,可以用单位厘米(cm)来衡量,可以用单位毫米(mm)来衡量;如果没有明确单位,默认指毫米"},
+        {"role": "user", "content": "问题描述:  计量单位可以用名称或者符号表示,常用的符号包括表示米的符号m,表示千米的符号km,表示吨的符号t,表示千克的符号kg,表示平方米的符号m2,表示立方米的符号m3。给定一段工作内容描述,内容为" + C + ",给定其工作量计量单位,内容为" + A + ",记作A,再给定一个工序描述, 内容为" + label + ",它属于前述工作内容的一部分。它的计量单位为" + B + ",记作B。若A表示面积,B表示体积,且工序涉及垫层,则可以从工作内容中识别出垫层的厚度(米),作为换算系数coefficient,最终返回A=coefficient*B。例如,工作内容涉及垫层的厚度是0.1米,则返回A=0.1*B。工作内容涉及垫层的厚度为200mm,则换算为0.2米,返回A=0.2*B。若不符合上述情况,则返回A<>B。例如,A表示长度,B表示面积,则返回A<>B。再例如,A表示重量,B表示体积,则返回A<>B"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=expression(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    return json_string
+def callzaihuansuan0111_3(bianma, label, A,B,C, aiclient, qwclient, sfclient):##C->tz
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-flash",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 背景知识:高度、宽度可以用单位米(m)来衡量,可以用单位厘米(cm)来衡量,可以用单位毫米(mm)来衡量;如果没有明确单位,默认指毫米"},
+        {"role": "user", "content": "问题描述:  计量单位可以用名称或者符号表示,常用的符号包括表示米的符号m,表示千米的符号km,表示吨的符号t,表示千克的符号kg,表示平方米的符号m2,表示立方米的符号m3。给定一段工作内容描述,内容为" + C + ",给定其工作量计量单位,内容为" + A + ",记作A,再给定一个工序描述, 内容为" + label + ",它属于前述工作内容的一部分。它的计量单位为" + B + ",记作B。若A表示面积,B表示长度,且工序涉及踢脚线,则可以从工作内容中识别出踢脚线的高度(米),然后根据矩形面积和高度,求出长度,作为换算系数coefficient,最终返回A=coefficient*B。例如,工作内容涉及踢脚线的高度是100毫米(0.1米),A表示面积是1m2(1平方米),则长度为10m,而B表示10m,则返回A=1*B。工作内容涉及踢脚线的高度为200mm,换算为0.2米,A表示面积1m2(1平方米),则可以求得矩形长度为5米,因为B表示长度为10米,所以返回A=0.5*B。若不符合上述情况,则返回A<>B。例如,A表示长度,B表示面积,则返回A<>B。再例如,A表示重量,B表示体积,则返回A<>B"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=expression(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    return json_string

+ 52 - 0
huansuan0112.py

@@ -0,0 +1,52 @@
+import re
+from config import simplemodel
+from template import expression
+with open('gangjiegouhuansuan','r') as f:
+    content = f.read()
+import json
+obj = json.loads(content)
+options=[]
+for i in range(len(obj['mc'])):
+    options.append('给定选项A'+str(i)+', 内容为'+obj['mc'][i])
+options = ','.join(options)
+    
+def callzaihuansuan0112_1(bianma, label, A,B,C, aiclient, qwclient, sfclient):##C->tz
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-flash",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 背景知识:长度可以用单位米(m)来衡量,可以用单位厘米(cm)来衡量,可以用单位毫米(mm)来衡量;如果没有明确单位,默认指毫米"},
+        {"role": "user", "content": "问题描述:  计量单位可以用名称或者符号表示,常用的符号包括表示米的符号m,表示千米的符号km,表示吨的符号t,表示千克的符号kg,表示平方米的符号m2,表示立方米的符号m3。给定一段工作内容描述,内容为" + C + ",给定其工作量计量单位,内容为" + A + ",记作A,再给定一个工序描述, 内容为" + label + ",它属于前述工作内容的一部分。它的计量单位为" + B + ",记作B。若A表示面积,B表示房间数量,且工序涉及卫生间隔断,则可以从工作内容中识别出每间隔断的宽度、高度(米),计算面积,取倒数作为换算系数coefficient,最终返回A=coefficient*B。例如,工作内容涉及隔断的高度是900(毫米),宽400(毫米),则可计算得面积0.36(平方米),求倒数得每平方米面积对应2.78间隔断。因为单位B是10间(不是1间,需按比例换算),所以最终返回A=0.278*B。若不符合上述情况,则返回A<>B。例如,A表示长度,B表示面积,则返回A<>B。再例如,A表示重量,B表示体积,则返回A<>B"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=expression(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    return json_string
+def callzaihuansuan0111_3(bianma, label, A,B,C, aiclient, qwclient, sfclient):##C->tz
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-flash",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 背景知识:高度、宽度可以用单位米(m)来衡量,可以用单位厘米(cm)来衡量,可以用单位毫米(mm)来衡量;如果没有明确单位,默认指毫米"},
+        {"role": "user", "content": "问题描述:  计量单位可以用名称或者符号表示,常用的符号包括表示米的符号m,表示千米的符号km,表示吨的符号t,表示千克的符号kg,表示平方米的符号m2,表示立方米的符号m3。给定一段工作内容描述,内容为" + C + ",给定其工作量计量单位,内容为" + A + ",记作A,再给定一个工序描述, 内容为" + label + ",它属于前述工作内容的一部分。它的计量单位为" + B + ",记作B。若A表示面积,B表示长度,且工序涉及踢脚线,则可以从工作内容中识别出踢脚线的高度(米),然后根据矩形面积和高度,求出长度,作为换算系数coefficient,最终返回A=coefficient*B。例如,工作内容涉及踢脚线的高度是100毫米(0.1米),A表示面积是1m2(1平方米),则长度为10m,而B表示10m,则返回A=1*B。工作内容涉及踢脚线的高度为200mm,换算为0.2米,A表示面积1m2(1平方米),则可以求得矩形长度为5米,因为B表示长度为10米,所以返回A=0.5*B。若不符合上述情况,则返回A<>B。例如,A表示长度,B表示面积,则返回A<>B。再例如,A表示重量,B表示体积,则返回A<>B"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=expression(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    return json_string

Разница между файлами не показана из-за своего большого размера
+ 0 - 0
incremental_rule


+ 1 - 1
jieheceng.py

@@ -27,7 +27,7 @@ def jieheceng(A, aiclient, qwclient):
         #model="ZhipuAI/GLM-4.5",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
-            {"role": "user", "content": "给定一段文字: " + A +   "\n 该段文字描述了包括一道或多道工序的工作内容。其中有可能一道或多道工作内容含有“结合层”字样。请你阅读这段文字,判断是否存在“结合层”工序。如果没有,请原封不动的返回给定的文字;如果有,请将含有“结合层”字样的一道或多道工作内容删除,返回剩余的工作内容,剩余的工作内容请原封不动的返回。 例如,给定一段文字:" + example1 +"\n请返回:" + answer1 + "\n 再例如,给定一段文字: " + example2 + "\n请返回: " + answer2},
+            {"role": "user", "content": "给定一段文字: " + A +   "\n 该段文字描述了包括一道或多道工序的工作内容。其中有可能一道或多道工作内容含有“结合层”、“粘接层”、“粘结层”字样。请你阅读这段文字,判断是否存在“结合层”、“粘接层”、“粘结层”工序。如果没有,请原封不动的返回给定的文字;如果有,请将含有“结合层”、“粘接层”、“粘结层”字样的一道或多道工作内容删除,返回剩余的工作内容,剩余的工作内容请原封不动的返回。 例如,给定一段文字:" + example1 +"\n请返回:" + answer1 + "\n 再例如,给定一段文字: " + example2 + "\n请返回: " + answer2},
         ],
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},

+ 63 - 0
mianceng.py

@@ -0,0 +1,63 @@
+example1="""
+散水 1、种植散水 做法详见图纸设计及图集12J003-6A/A2
+3、60厚C20细石混凝土,随打随磨,撒1:1水泥砂子压实赶光
+4、150厚粒径10-40卵石灌M2.5混合砂浆
+5、素土夯实,向外坡3%-5%
+6、与墙交接处聚乙烯泡沫塑料及密封膏嵌缝
+"""
+answer1="""
+散水 1、种植散水 做法详见图纸设计及图集12J003-6A/A2
+3、面层 60厚C20细石混凝土,随打随磨,撒1:1水泥砂子压实赶光
+4、150厚粒径10-40卵石灌M2.5混合砂浆 
+5、素土夯实,向外坡3%-5%
+6、与墙交接处聚乙烯泡沫塑料及密封膏嵌缝
+"""
+example2='''
+给定的文字如下:
+
+1、用于标高46.25m处平屋面及坡屋面下平屋面 用于标高50.25m处平屋面 做法详见图纸设计及相关图集规范
+2、50厚C30细石混凝土,内配φ4@100双向钢筋网片,面层压光,设4mx4m分格缝(钢筋断开),缝宽15,内填嵌膏缝油
+3、10厚1:3石灰砂浆隔离层
+
+经过分析,这段文字描述的是屋面构造做法,包括混凝土面层和隔离层。其中没有提到垫层相关的施工内容(如砂石层、回填土等起支撑或排水作用的基层材料)。
+
+因此,该段文字中**没有垫层的施工工序**,原封不动返回如下:
+
+1、用于标高46.25m处平屋面及坡屋面下平屋面 用于标高50.25m处平屋面 做法详见图纸设计及相关图集规范
+2、50厚C30细石混凝土,内配φ4@100双向钢筋网片,面层压光,设4mx4m分格缝(钢筋断开),缝宽15,内填嵌膏缝油
+3、10厚1:3石灰砂浆隔离层
+'''
+answer2='''
+1、用于标高46.25m处平屋面及坡屋面下平屋面 用于标高50.25m处平屋面 做法详见图纸设计及相关图集规范
+2、50厚C30细石混凝土,内配φ4@100双向钢筋网片,面层压光,设4mx4m分格缝(钢筋断开),缝宽15,内填嵌膏缝油
+3、10厚1:3石灰砂浆隔离层
+'''
+def mianceng(A, aiclient, qwclient):
+    completion = qwclient.chat.completions.create(
+        model="Qwen/Qwen3-32B",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": "散水、坡道往往使用细石混凝土作面层。给定一段文字: " + A +   "\n 该段文字描述了包括一道或多道工序的散水、坡道的施工工作内容。其中有可能包括了面层施工工序。请你阅读这段文字,判断是否存在面层的工作内容。如果没有,请原封不动的返回给定的文字;如果有,请在面层的工序上注明“面层”两个字,其余则原封不动,并返回。 例如,给定一段文字:" + example1 +"\n请返回:" + answer1 + "\n  "},
+        ],
+        #extra_body={"thinking": {"type": "disabled"}},
+        extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": "给定一段文字:   请抽取出文字中的返回结果"},
+            {"role": "user", "content": "例如,给定一段文字: " + example2 +   "\n 请返回:\n"+answer2},
+            {"role": "user", "content": "现在,给定一段文字: " + json_string +   "\n 请参照例子,返回抽取出的结果,请直接返回结果"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    return '\n'.join([x for x in json_string.split("\n") if '结果' not in x])
+    
+    
+    

+ 69 - 0
mianji.py

@@ -0,0 +1,69 @@
+def adjust(
+       selected,
+       data,
+       aiclient,
+       qwclient,
+       sfclient
+       ):
+    options=[
+        ['第十三章  楼地面工程 13.4  块料面层 13.4.4  地砖、橡胶塑料板 楼地面单块0.4m2以内地砖 干硬性水泥砂浆', '第十三章  楼地面工程 13.4  块料面层 13.4.4  地砖、橡胶塑料板 楼地面单块0.4m2以外地砖 干硬性水泥砂浆'],
+        ['第十三章  楼地面工程 13.4  块料面层 13.4.4  地砖、橡胶塑料板 楼地面单块0.4m2以内地砖 水泥砂浆', '第十三章  楼地面工程 13.4  块料面层 13.4.4  地砖、橡胶塑料板 楼地面单块0.4m2以外地砖 水泥砂浆'],
+        ['第十三章  楼地面工程 13.4  块料面层 13.4.4  地砖、橡胶塑料板 楼地面单块0.4m2以内地砖 干粉型粘结剂','第十三章  楼地面工程 13.4  块料面层 13.4.4  地砖、橡胶塑料板 楼地面单块0.4m2以外地砖 干粉型粘结剂'],
+        ['第十三章  楼地面工程 13.4  块料面层 13.4.4  地砖、橡胶塑料板 楼梯单块0.1m2以内地砖 水泥砂浆', '第十三章  楼地面工程 13.4  块料面层 13.4.4  地砖、橡胶塑料板 楼梯单块0.4m2以内地砖 水泥砂浆', '第十三章  楼地面工程 13.4  块料面层 13.4.4  地砖、橡胶塑料板 楼梯单块0.4m2以外地砖 水泥砂浆'],
+        ['第十四章  墙柱面工程 14.3  镶贴块料面层及幕墙 14.3.1  瓷砖 单块面积0.06m2以内墙砖 砂浆粘贴 墙面', '第十四章  墙柱面工程 14.3  镶贴块料面层及幕墙 14.3.1  瓷砖 单块面积0.18m2以内墙砖 砂浆粘贴 墙面', '第十四章  墙柱面工程 14.3  镶贴块料面层及幕墙 14.3.1  瓷砖 单块面积0.18m2以上墙砖 砂浆粘贴 墙面'],
+        ['第十四章  墙柱面工程 14.3  镶贴块料面层及幕墙 14.3.1  瓷砖 单块面积0.06m2以内墙砖 砂浆粘贴 柱、梁、零星面', '第十四章  墙柱面工程 14.3  镶贴块料面层及幕墙 14.3.1  瓷砖 单块面积0.18m2以内墙砖 砂浆粘贴 柱、梁、零星面', '第十四章  墙柱面工程 14.3  镶贴块料面层及幕墙 14.3.1  瓷砖 单块面积0.18m2以上墙砖 砂浆粘贴 柱、梁、零星面'],
+        ['第十四章  墙柱面工程 14.3  镶贴块料面层及幕墙 14.3.1  瓷砖 单块面积0.06m2以内墙砖 干粉型粘结剂粘贴 墙面', '第十四章  墙柱面工程 14.3  镶贴块料面层及幕墙 14.3.1  瓷砖 单块面积0.18m2以内墙砖 干粉型粘结剂粘贴 墙面', '第十四章  墙柱面工程 14.3  镶贴块料面层及幕墙 14.3.1  瓷砖 单块面积0.18m2以上墙砖 干粉型粘结剂粘贴 墙面'],
+        ['第十四章  墙柱面工程 14.3  镶贴块料面层及幕墙 14.3.1  瓷砖 单块面积0.06m2以内墙砖 干粉型粘结剂粘贴 柱、梁、零星面', '第十四章  墙柱面工程 14.3  镶贴块料面层及幕墙 14.3.1  瓷砖 单块面积0.18m2以内墙砖 干粉型粘结剂粘贴 柱、梁、零星面', '第十四章  墙柱面工程 14.3  镶贴块料面层及幕墙 14.3.1  瓷砖 单块面积0.18m2以上墙砖 干粉型粘结剂粘贴 柱、梁、零星面']
+    ]
+    hit = []
+    hit_group = []
+    for entry in selected:
+        for group in options:
+            if entry in group:
+                hit = [entry]
+                hit_group = group
+    if len(hit) == 0:
+        return selected
+    xuanxiang=[]
+    choice=[]
+    letters='ABC'
+    for i in range(len(hit_group)):
+        xuanxiang.append("给定一个选项:" + hit_group[i] + ', 记作' + letters[i])
+    completion = aiclient.chat.completions.create(
+            model="glm-4.5-air",
+            messages=[
+                {"role": "system", "content": "You are a helpful assistant."},
+                {"role": "user", "content": "长度单位可以是米,是厘米,是毫米。如果未注明,则默认是毫米."},
+                {"role": "user", "content": "以下是一段工作内容的描述: " + data['tz'] + '\n其中可能包括了地砖或者墙砖的尺寸。譬如,10厚800X800防滑地砖,就表明地砖尺寸为800毫米乘以800毫米\n根据尺寸可以算出单块砖的面积0.64m2。再例如,10厚600X600地砖,尺寸600毫米,面积0.36m2' + ';'.join(xuanxiang) + "\n请根据工作内容计算出面积,并从上述选项中选择单块面积最恰当的一个选项并返回。请给出分析过程并返回A或者B或者C作为答案.如果无法确定,请返回A"},
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+        )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if len(json_string) < 8:
+        if 'A' in json_string:
+            choice=[hit_group[0]]
+        if 'B' in json_string:
+            choice=[hit_group[1]]
+        if 'C' in json_string:
+            choice=[hit_group[2]]
+    else:
+        completion = aiclient.chat.completions.create(
+        model="glm-4.5-flash",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A或者B或者C的选项作为答案,请将该答案输出"},                       
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+    )
+        json_string = completion.choices[0].message.content
+        print(json_string)
+        if 'A' in json_string:
+            choice=[hit_group[0]]
+        if 'B' in json_string:
+            choice=[hit_group[1]]
+        if 'C' in json_string:
+            choice=[hit_group[2]]
+    selected = [x for x in selected if x not in hit]
+    selected = selected + choice
+    return selected

+ 26 - 10
postprocess.py

@@ -8,28 +8,44 @@ from postprocess0108 import postprocess0108
 from postprocess0109 import postprocess0109
 from postprocess0110 import postprocess0110
 from postprocess0111 import postprocess0111
-def postprocess(selected, data, aiclient, qwclient, sfclient, label_name, name_dw):
+from postprocess0112 import postprocess0112
+from postprocess0113 import postprocess0113
+from postprocess0114 import postprocess0114
+from postprocess0115 import postprocess0115
+from mianji import adjust
+def postprocess(selected, data, aiclient, qwclient, sfclient, label_name, name_dw, candidates,yqspecial):
+    l = postprocess_(selected,data,aiclient,qwclient,sfclient,label_name,name_dw,candidates,yqspecial)
+    return adjust(l, data, aiclient, qwclient, sfclient)
+def postprocess_(selected, data, aiclient, qwclient, sfclient, label_name, name_dw, candidates,yqspecial):
     if len(selected) == 0:
         return []
     if data['bianma'].startswith("0103"):
-        return postprocess0103(selected, data, aiclient)
+        return postprocess0103(selected, data, aiclient, sfclient)
     elif data['bianma'].startswith("0117"):
-        return postprocess0117(selected, data, aiclient)
+        return postprocess0117(selected, data, aiclient, sfclient)
     elif data['bianma'].startswith("0101"):
-        return postprocess0101(selected, data, aiclient)
+        return postprocess0101(selected, data, aiclient, sfclient)
     elif data['bianma'].startswith("0104"):
-        return postprocess0104(selected, data, aiclient)
+        return postprocess0104(selected, data, aiclient, sfclient)
     elif data['bianma'].startswith("0105"):
-        return postprocess0105(selected, data, aiclient, qwclient, label_name, name_dw)
+        return postprocess0105(selected, data, aiclient, qwclient, sfclient, label_name, name_dw)
     elif data['bianma'].startswith("0106"):
-        return postprocess0106(selected, data, aiclient, qwclient, label_name, name_dw)
+        return postprocess0106(selected, data, aiclient, qwclient, sfclient, label_name, name_dw,yqspecial)
     elif data['bianma'].startswith("0108"):
-        return postprocess0108(selected, data, aiclient, qwclient, label_name, name_dw)
+        return postprocess0108(selected, data, aiclient, qwclient, sfclient, label_name, name_dw)
     elif data['bianma'].startswith("0109"):
-        return postprocess0109(selected, data, aiclient, qwclient, label_name, name_dw)
+        return postprocess0109(selected, data, aiclient, qwclient, sfclient, label_name, name_dw)
     elif data['bianma'].startswith("0110"):
-        return postprocess0110(selected, data, aiclient, qwclient, label_name, name_dw)
+        return postprocess0110(selected, data, aiclient, qwclient, sfclient, label_name, name_dw)
     elif data['bianma'].startswith("0111"):
         return postprocess0111(selected, data, aiclient, qwclient, sfclient, label_name, name_dw)
+    elif data['bianma'].startswith("0112"):
+        return postprocess0112(selected, data, aiclient, qwclient, sfclient, label_name, name_dw, candidates)
+    elif data['bianma'].startswith("0113"):
+        return postprocess0113(selected, data, aiclient, qwclient, sfclient, label_name, name_dw, candidates)
+    elif data['bianma'].startswith("0114"):
+        return postprocess0114(selected, data, aiclient, qwclient, sfclient, label_name, name_dw, candidates)
+    elif data['bianma'].startswith("0115"):
+        return postprocess0115(selected, data, aiclient, qwclient, sfclient, label_name, name_dw, candidates)
     else: 
         return selected

+ 17 - 21
postprocess0101.py

@@ -1,7 +1,9 @@
 import json
+from config import simplemodel
+from template import xuanxiang
 def huitianfilter(A, #options
        B, #data
-       aiclient):
+       aiclient, sfclient):
     options=[]
     letters = "ABCDEFGHIJKLMN"
     for i in range(len(A)):
@@ -21,12 +23,9 @@ def huitianfilter(A, #options
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B,C]的数组作为答案,请将该最终答案输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
         extra_body={"thinking": {"type": "disabled"}},
     )
     json_string = completion.choices[0].message.content
@@ -51,7 +50,7 @@ def huitianfilter(A, #options
     return answer
 def aifilter(A, #options
        B, #data
-       aiclient):
+       aiclient, sfclient):
     options=[]
     letters = "ABCDEFGHIJKLMN"
     for i in range(len(A)):
@@ -68,12 +67,9 @@ def aifilter(A, #options
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B,C]的数组作为答案,请将该最终答案输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
         extra_body={"thinking": {"type": "disabled"}},
     )
     json_string = completion.choices[0].message.content
@@ -98,7 +94,7 @@ def aifilter(A, #options
     return answer
 def ai(A, #options
        B, #data
-       aiclient):
+       aiclient, sfclient):
     options=[]
     letters = "ABCDEFGHIJKLMN"
     for i in range(len(A)):
@@ -137,8 +133,8 @@ def ai(A, #options
         if 'J' in json_string:
             return A[9]
 
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
         messages=[
             {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
             {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A或者B或者C的表达式作为答案,请将该最终答案输出.例如,文字提到答案为A,请输出A;文字提到答案是B,请输出B"},                       
@@ -185,13 +181,13 @@ def tihuan(selected):
         return left
     else:
         return selected
-def postprocess0101(selected, data, aiclient):
+def postprocess0101(selected, data, aiclient, sfclient):
     if data['bianma'].startswith("010101001"):##平整场地
         if len(selected) > 1:
-            return tihuan([ai(selected, data, aiclient)])
+            return tihuan([ai(selected, data, aiclient, sfclient)])
         else:
             return tihuan(selected)
     elif data['bianma'].startswith("010103"):##回填
-        return huitianfilter(selected, data, aiclient)
+        return huitianfilter(selected, data, aiclient, sfclient)
     else:
-        return aifilter(selected, data, aiclient)
+        return aifilter(selected, data, aiclient, sfclient)

+ 26 - 26
postprocess0103.py

@@ -1,3 +1,5 @@
+from config import simplemodel
+from template import xuanxiang
 import json
 with open("0103basic_rule", "r") as f:
     content = f.read()
@@ -5,29 +7,26 @@ with open("0103basic_rule", "r") as f:
 rule = json.loads(content)
 def aifilter(A, #options
        B, #data
-       aiclient):
+       aiclient, sfclient):
     options=[]
     letters = "ABCDEFGHIJKLMN"
     for i in range(len(A)):
         options.append("给定选项" + letters[i]+",内容为"+A[i])
 
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
             {"role": "user", "content": " 处理要求:如果清单描述的工作内容是管桩清理,则去除给定选项中的管桩填芯的选项; 如果清单描述的工作内容不是管桩清理,则不做处理"},
-            {"role": "user", "content": "问题描述: 给定一段工程量清单描述: " + B['mc'] + " " + B['tz'] +   "," + ",".join(options) + "。请根据处理要求,处理选项,并返回结果。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+            {"role": "user", "content": "问题描述: 给定一段工程量清单描述: " + B['mc'] + " " + B['tz'] +   "," + ",".join(options) + "。请根据处理要求,处理选项,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
         extra_body={"thinking": {"type": "disabled"}},
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终结果以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B,C]的数组作为结果,请将该最终结果输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
         extra_body={"thinking": {"type": "disabled"}},
     )
     json_string = completion.choices[0].message.content
@@ -85,10 +84,10 @@ def associate(answer):
 
 def jiezhuang(
        data, #data
-       aiclient):
+       aiclient, sfclient):
 
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
             {"role": "user", "content": " 背景知识:已知预应力高强混凝土管桩(PHC)型号定义为PHC-AAA(BB)CC-DDD-E1,E2,E3,E4,其中AAA代表管桩外径,BB代表管桩壁厚,CC表示型号,DDD表示混凝土强度等级,E1/E2/E3/E4表示分段桩长。例如,PHC-500(125)-AB-C80-9,7 表示外径500mm,壁厚125mm,型号AB,混凝土强度C80, 分段桩长分别为9米和7米,总桩长16米,施工时需要将两个分段接桩;再例如,PHC-500(125)-AB-C80-9 表示外径500mm,壁厚125mm,型号AB,混凝土强度C80, 为一整段桩,长9米, 施工时不需要接桩"},
@@ -103,8 +102,8 @@ def jiezhuang(
         return True
     if "是" not in json_string and "否" in json_string:
         return False
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
         messages=[
             {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
             {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个是或者否的答案,请将该最终答案输出"},                       
@@ -119,7 +118,7 @@ def jiezhuang(
         return False
 def songzhuang(
        data, #data
-       aiclient):
+       aiclient, sfclient):
 
     completion = aiclient.chat.completions.create(
         model="glm-4.5-flash",
@@ -136,8 +135,8 @@ def songzhuang(
         return True
     if "是" not in json_string and "否" in json_string:
         return False
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
         messages=[
             {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
             {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个是或者否的答案,请将该最终答案输出"},                       
@@ -153,7 +152,7 @@ def songzhuang(
 def ai(A, #options
        B, #data
        C, #entry
-       aiclient):
+       aiclient, sfclient):
     options=[]
     letters = "ABCDEFGHIJKLMN"
     for i in range(len(A)):
@@ -192,8 +191,8 @@ def ai(A, #options
         if 'J' in answer2:
             return A[9]
 
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
         messages=[
             {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
             {"role": "user", "content": " 给你一段文字如下, " + answer2 + ",其中给出了一个类似于A或者B或者C的表达式作为答案,请将该最终答案输出"},                       
@@ -204,6 +203,7 @@ def ai(A, #options
     print(json_string)
     answers = json_string.split("\n")
     answers = [x for x in answers if ':' in x]
+    answers = [x for x in answers if 'A' in x or 'B' in x or 'C' in x or 'D' in x]
     print(answers)
     if len(answers) == 0:
         return C
@@ -228,15 +228,15 @@ def ai(A, #options
         return A[8]
     if 'J' in answer2:
         return A[9]
-def select(options, data, entry, aiclient):
+def select(options, data, entry, aiclient, sfclient):
     if len([x for x in options if '桩长在' in x]) == len(options):
-        return ai(options, data, entry, aiclient)
+        return ai(options, data, entry, aiclient, sfclient)
     else:
         return entry
-def postprocess0103(selected, data, aiclient):
-    if jiezhuang(data, aiclient):
+def postprocess0103(selected, data, aiclient, sfclient):
+    if jiezhuang(data, aiclient, sfclient):
         selected = associate_jiezhuang(selected)
-    if songzhuang(data, aiclient):
+    if songzhuang(data, aiclient, sfclient):
         selected = associate(selected)
     correct=[]
     for entry in selected:
@@ -246,7 +246,7 @@ def postprocess0103(selected, data, aiclient):
             if entry in l:
                 options = l
         if len(options) > 0:
-            correct.append(select(options, data, entry, aiclient))
+            correct.append(select(options, data, entry, aiclient, sfclient))
         else:
             correct.append(entry)
-    return aifilter(correct, data, aiclient)
+    return aifilter(correct, data, aiclient, sfclient)

+ 10 - 11
postprocess0104.py

@@ -1,7 +1,9 @@
 import json
+from config import simplemodel
+from template import xuanxiang
 def aifilter(A, #options
        B, #data
-       aiclient):
+       aiclient, sfclient):
     options=[]
     letters = "ABCDEFGHIJKLMN"
     for i in range(len(A)):
@@ -11,19 +13,16 @@ def aifilter(A, #options
         model="glm-4.5-flash",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
-            {"role": "user", "content": " 处理要求:如果工作内容中没有明确提出砌贴砖,则去除选项中的砌贴砖选项"},
-            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +   "," + ",".join(options) + "。请根据处理要求做出处理,并返回结果。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+            {"role": "user", "content": " 处理要求:如果工作内容中没有明确提出砌贴砖,则去掉含有“砌贴砖”字样的选项"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +   "," + ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
         extra_body={"thinking": {"type": "disabled"}},
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B,C]的数组作为结果,请将该最终结果输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
         extra_body={"thinking": {"type": "disabled"}},
     )
     json_string = completion.choices[0].message.content
@@ -46,5 +45,5 @@ def aifilter(A, #options
     if 'H' in json_string:
         answer.append(A[7])
     return answer
-def postprocess0104(selected, data, aiclient):
-    return aifilter(selected, data, aiclient)
+def postprocess0104(selected, data, aiclient, sfclient):
+    return aifilter(selected, data, aiclient, sfclient)

+ 257 - 27
postprocess0105.py

@@ -1,13 +1,20 @@
 import json
+from config import simplemodel
+from template import xuanxiang
 with open('nantong_rule', 'r') as f:
     content = f.read()
 import json
 obj=json.loads(content)
 from fallback import fallback
-def aifilter(A, #options
+qiangdu={"role": "user", "content": " 特殊处理要求:不需考虑工作内容中的混凝土强度等级与选项中的混凝土强度等级的差异"}
+xiang={"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"}
+shanchu={"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合(匹配),不得擅自根据符合与否去除选项,只需要根据特殊处理要求做出处理"}
+ceng={"role": "user", "content": " 重要提示:混凝土找平层与混凝土面层是不同的概念,不得混淆。混凝土整体面层不属于混凝土找平层"}
+def aifilter4(A, #options
        B, #data
        aiclient,
              qwclient,
+             sfclient,
              dw):
     options=[]
     letters = "ABCDEFGHIJKLMN"
@@ -15,44 +22,254 @@ def aifilter(A, #options
         options.append("给定选项" + letters[i]+",内容为"+A[i] )
 
     completion = aiclient.chat.completions.create(
-        model="glm-z1-flash",
+        model="glm-4.5-air",
         #model="Qwen/Qwen3-14B",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
-            {"role": "user", "content": " 特殊处理要求:如果选项中同时出现现浇板式雨棚(雨蓬)选项和现浇水平板(平板)选项,则二者只能选一个,且优先选择出现更早的选项"},
-            {"role": "user", "content": " 特殊处理要求:如果选项中同时出现多个钢丝网选项,则只能选一个,且优先选择出现更早的选项"},
-            {"role": "user", "content": " 特殊处理要求:钢盖板,钢篦子一般指不锈钢材质。如果选项中同时出现不锈钢盖板选项和铸铁盖板选项,则只能选一种,且优先选择与工作内容描述更接近的选项"},
-            {"role": "user", "content": " 特殊处理要求:如果选项中同时出现混凝土散水选项和混凝土(砼)整体面层选项,则二者只能选一个,且优先选择出现更早的选项"},
-            {"role": "user", "content": " 特殊处理要求:如果选项中有模板工程选项,但是工作内容中没有明确提出包含模板工程,则去除选项中的模板工程选项"},
-            {"role": "user", "content": " 特殊处理要求:如果选项中有锯缝选项,但是工作内容中没有明确提出需要锯缝(道路伸缩缝),则去除选项中的锯缝选项"},
-            {"role": "user", "content": " 特殊处理要求:如果选项中有檩条选项,但是工作内容中没有涉及檩条,则去除选项中的檩条选项"},
-            {"role": "user", "content": " 特殊处理要求:如果选项中有加浆选项,但是工作内容中没有明确提出需要加浆,则去除选项中的加浆选项"},
             {"role": "user", "content": " 特殊处理要求:如果选项中有混凝土垫层选项,但是工作内容中明确描述垫层做法是碎石垫层,则去除选项中的混凝土垫层选项"},
             {"role": "user", "content": " 特殊处理要求:如果选项中有混凝土垫层选项,但是工作内容中不涉及垫层,则去除选项中的混凝土垫层选项"},
-            {"role": "user", "content": " 特殊处理要求:如果选项中有细石混凝土(细石砼)找平层选项,但是工作内容没有明确表述细石混凝土找平层,则去除选项中的细石混凝土找平层选项"},
+            {"role": "user", "content": " 特殊处理要求:如果选项中有细石混凝土(细石砼)找平层选项(细石混凝土整体面层不属于找平层),但是工作内容没有明确表述细石混凝土找平层,则去除选项中的细石混凝土找平层选项"},
             {"role": "user", "content": " 特殊处理要求:抱框柱是柱的一种,与门框不相同。如果选项中有门框选项,但是工作内容中没有明确提出门框,则去除选项中的门框选项"},
-            {"role": "user", "content": " 特殊处理要求:不需考虑工作内容中的混凝土强度等级与选项中的混凝土强度等级的差异"},
-            {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
-            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
-            {"role": "user", "content": " 重要提示:混凝土找平层与混凝土面层是不同的概念,不得混淆"},
-            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,写出推理过程,并返回结果。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+            qiangdu,xiang,shanchu,ceng,
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,写出推理过程,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string:
+        answer.append(A[0])
+    if 'B' in json_string:
+        answer.append(A[1])
+    if 'C' in json_string:
+        answer.append(A[2])
+    if 'D' in json_string:
+        answer.append(A[3])
+    if 'E' in json_string:
+        answer.append(A[4])
+    if 'F' in json_string:
+        answer.append(A[5])
+    if 'G' in json_string:
+        answer.append(A[6])
+    if 'H' in json_string:
+        answer.append(A[7])
+    if 'I' in json_string:
+        answer.append(A[8])
+    if 'J' in json_string:
+        answer.append(A[9])
+    return answer
+def aifilter2(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+             sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="Qwen/Qwen3-14B",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 特殊处理要求:如果选项中有檩条选项,但是工作内容中没有涉及檩条,则去除选项中的檩条选项"},
+            {"role": "user", "content": " 特殊处理要求:如果工作内容中没有明确提及素水泥浆(提到水泥浆不等于提到素水泥浆),则去除选项中的素水泥浆选项"},
+             qiangdu,xiang,shanchu,ceng,
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,写出推理过程,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string:
+        answer.append(A[0])
+    if 'B' in json_string:
+        answer.append(A[1])
+    if 'C' in json_string:
+        answer.append(A[2])
+    if 'D' in json_string:
+        answer.append(A[3])
+    if 'E' in json_string:
+        answer.append(A[4])
+    if 'F' in json_string:
+        answer.append(A[5])
+    if 'G' in json_string:
+        answer.append(A[6])
+    if 'H' in json_string:
+        answer.append(A[7])
+    if 'I' in json_string:
+        answer.append(A[8])
+    if 'J' in json_string:
+        answer.append(A[9])
+    return answer
+def aifilter3(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+             sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="Qwen/Qwen3-14B",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 特殊处理要求:如果选项中有模板工程选项,但是工作内容中没有明确提出包含模板工程,则去除选项中的模板工程选项"},
+            {"role": "user", "content": " 特殊处理要求:如果选项中有锯缝选项,但是工作内容中没有明确提出需要锯缝(道路伸缩缝),则去除选项中的锯缝选项"},
+            qiangdu,xiang,shanchu,ceng,
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,写出推理过程,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string:
+        answer.append(A[0])
+    if 'B' in json_string:
+        answer.append(A[1])
+    if 'C' in json_string:
+        answer.append(A[2])
+    if 'D' in json_string:
+        answer.append(A[3])
+    if 'E' in json_string:
+        answer.append(A[4])
+    if 'F' in json_string:
+        answer.append(A[5])
+    if 'G' in json_string:
+        answer.append(A[6])
+    if 'H' in json_string:
+        answer.append(A[7])
+    if 'I' in json_string:
+        answer.append(A[8])
+    if 'J' in json_string:
+        answer.append(A[9])
+    return answer
+def aifilter1_2(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+             sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="Qwen/Qwen3-14B",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 特殊处理要求:钢盖板,钢篦子一般指不锈钢材质。如果选项中同时出现不锈钢盖板选项和铸铁盖板选项,则二者只能选一种(并删除另外一个),且优先选择与工作内容描述更接近的选项"},
+            {"role": "user", "content": " 特殊处理要求:如果选项中同时出现混凝土散水选项和混凝土(砼)整体面层选项(加浆抹光选项虽然有整体面层字样,但是不属于整体面层选项),则二者只能选一个(并删除另一个),且优先选择出现更早的选项"},
+            qiangdu,xiang,shanchu,ceng,
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,写出推理过程,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
-        extra_body={"thinking": {"type": "enabled"}},
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
     json_string = completion.choices[0].message.content
     print(json_string)
+    answer=[]
+    if 'A' in json_string:
+        answer.append(A[0])
+    if 'B' in json_string:
+        answer.append(A[1])
+    if 'C' in json_string:
+        answer.append(A[2])
+    if 'D' in json_string:
+        answer.append(A[3])
+    if 'E' in json_string:
+        answer.append(A[4])
+    if 'F' in json_string:
+        answer.append(A[5])
+    if 'G' in json_string:
+        answer.append(A[6])
+    if 'H' in json_string:
+        answer.append(A[7])
+    if 'I' in json_string:
+        answer.append(A[8])
+    if 'J' in json_string:
+        answer.append(A[9])
+    return answer
+def aifilter1_1(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+             sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
+        #model="Qwen/Qwen3-14B",
         messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B,C]的数组作为结果,请将该最终结果输出"},                       
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 特殊处理要求:如果选项中同时出现现浇板式雨棚(雨蓬)选项和现浇水平板(平板)选项,则二者只能选一个(并删除另一个),且优先选择出现更早的选项"},
+            {"role": "user", "content": " 特殊处理要求:如果选项中同时出现多个钢丝网选项,则只能选一个(并删除另一个),且优先选择出现更早的选项"},
+            qiangdu,xiang,shanchu,ceng,
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,写出推理过程,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
     json_string = completion.choices[0].message.content
     print(json_string)
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
     answer=[]
     if 'A' in json_string:
         answer.append(A[0])
@@ -75,18 +292,31 @@ def aifilter(A, #options
     if 'J' in json_string:
         answer.append(A[9])
     return answer
-def postprocess0105(selected, data, aiclient, qwclient, label_name, name_dw):
+def postprocess0105(selected, data, aiclient, qwclient, sfclient, label_name, name_dw):
     name=[label_name[x] for x in selected]
     dw=[name_dw[x] for x in name]
     if len(selected) == 1:
         t1 = selected
     else:   
-        t1 = aifilter(selected, data, aiclient, qwclient, dw)
+        t1 = aifilter1_1(selected, data, aiclient, qwclient, sfclient, dw)
+        t1 = aifilter1_2(t1, data, aiclient, qwclient, sfclient, dw)
+        t1 = aifilter3(t1, data, aiclient, qwclient, sfclient, dw)
+        t1 = aifilter2(t1, data, aiclient, qwclient, sfclient, dw)
+        t1 = aifilter4(t1, data, aiclient, qwclient, sfclient, dw)
     #t2 = fallback(t1, data, aiclient, qwclient)
     result = []
-    for entry in t1:
-        if entry in obj['7.8']:##钢盖板
-            result.append(obj['nantong7.8'][0])
-        else:
-            result.append(entry)
+    if '套' in data['dw']:
+        for entry in t1:
+            if entry in obj['7.8']:##钢盖板
+                result.append('第十二章  厂区道路及排水工程 12.5  排水系统中钢筋混凝土井、池、其它 铸铁盖板安装')
+            elif '成品不锈钢盖板安装' in entry:
+                result.append('第十二章  厂区道路及排水工程 12.5  排水系统中钢筋混凝土井、池、其它 铸铁盖板安装')
+            else:
+                result.append(entry)
+    else:    
+        for entry in t1:
+            if entry in obj['7.8']:##钢盖板
+                result.append(obj['nantong7.8'][0])
+            else:
+                result.append(entry)
     return result

+ 217 - 42
postprocess0106.py

@@ -1,3 +1,6 @@
+from template import xuanxiang
+from config import simplemodel
+import time
 tuliaos=[
 '第十七章  油漆、涂料、裱糊工程 17.1  油漆、涂料 17.1.2  金属面油漆 17.1.2.3  防火涂料 金属面防火涂料 薄型 0.5小时',
 '第十七章  油漆、涂料、裱糊工程 17.1  油漆、涂料 17.1.2  金属面油漆 17.1.2.3  防火涂料 金属面防火涂料 薄型 1小时',
@@ -21,6 +24,7 @@ def fanghuotuliao(A, #options
        B, #data
        aiclient,
              qwclient,
+            sfclient,
              dw):
     options=[]
     letters = "ABCDEFGHIJKLMN"
@@ -28,7 +32,7 @@ def fanghuotuliao(A, #options
         options.append("给定选项" + letters[i]+",内容为"+A[i] )
 
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
         #model="Qwen/Qwen3-14B",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
@@ -44,8 +48,8 @@ def fanghuotuliao(A, #options
             return True
         else:
             return False
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
         messages=[
             {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
             {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个是或者否的判断作为结果,请将该最终结果输出"},                       
@@ -63,6 +67,7 @@ def fanghuo(A, #options
        B, #data
        aiclient,
              qwclient,
+            sfclient,
              dw):
     options=[]
     letters = "ABCDEFGHIJKLMN"
@@ -70,7 +75,7 @@ def fanghuo(A, #options
         options.append("给定选项" + letters[i]+",内容为"+A[i] )
 
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
         #model="Qwen/Qwen3-14B",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
@@ -86,8 +91,8 @@ def fanghuo(A, #options
             return True
         else:
             return False
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
         messages=[
             {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
             {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个是或者否的判断作为结果,请将该最终结果输出"},                       
@@ -105,10 +110,11 @@ def tuliaofilter(A, #options
        B, #data
        aiclient,
              qwclient,
+            sfclient,
              dw):
 
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
         #model="Qwen/Qwen3-14B",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
@@ -134,8 +140,8 @@ def tuliaofilter(A, #options
             return tuliaos[5]
         if 'G' in json_string:
             return tuliaos[6]
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
         messages=[
             {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
             {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A或者B的字母作为结果,请将该最终结果输出"},                       
@@ -163,37 +169,92 @@ def aifilter3(A, #options
        B, #data
        aiclient,
              qwclient,
+              sfclient,
              dw):
     options=[]
     letters = "ABCDEFGHIJKLMN"
     for i in range(len(A)):
         options.append("给定选项" + letters[i]+",内容为"+A[i] )
-
+    time.sleep(5)
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
         #model="Qwen/Qwen3-14B",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
-            {"role": "user", "content": " 特殊处理要求:如果选项中有含有“零星钢构件制作”字样的选项,并且同时选项中有含有“铁件制作”字样的选项,则二者只能选一,且优先选择出现靠前的选项"},
+            {"role": "user", "content": " 特殊处理要求:如果选项中有含有“零星钢构件制作”字样的选项,并且同时选项中有含有“铁件制作”字样的选项,则二者只能选一(并删除另一个),且优先选择出现靠前的选项"},
             {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
-            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
     json_string = completion.choices[0].message.content
     print(json_string)
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string and len(A) > 0:
+        answer.append(A[0])
+    if 'B' in json_string and len(A) > 1:
+        answer.append(A[1])
+    if 'C' in json_string and len(A) > 2:
+        answer.append(A[2])
+    if 'D' in json_string and len(A) > 3:
+        answer.append(A[3])
+    if 'E' in json_string and len(A) > 4:
+        answer.append(A[4])
+    if 'F' in json_string and len(A) > 5:
+        answer.append(A[5])
+    if 'G' in json_string and len(A) > 6:
+        answer.append(A[6])
+    if 'H' in json_string and len(A) > 7:
+        answer.append(A[7])
+    if 'I' in json_string and len(A) > 8:
+        answer.append(A[8])
+    if 'J' in json_string and len(A) > 9:
+        answer.append(A[9])
+    return answer
+def aifilter1(A, #options
+       B, #data
+       aiclient,
+             qwclient, 
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
+        #model="Qwen/Qwen3-14B",
         messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B,C]的数组作为结果,请将该最终结果输出"},                       
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 特殊处理要求一:如果选项中有晒衣架制作安装的选项,但是工作内容中不涉及晒衣架,则去除选项中的晒衣架制作安装的选项"},
+            {"role": "user", "content": " 特殊处理要求二:如果选项中有龙骨钢骨架制作安装的选项,但是工作内容中不涉及龙骨钢骨架,则去除选项中的龙骨钢骨架的选项"},
+            {"role": "user", "content": " 特殊处理要求三:保留选项中的其他选项"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
     json_string = completion.choices[0].message.content
     print(json_string)
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
     answer=[]
     if 'A' in json_string:
         answer.append(A[0])
@@ -216,42 +277,152 @@ def aifilter3(A, #options
     if 'J' in json_string:
         answer.append(A[9])
     return answer
-def aifilter1(A, #options
+def aifilter5(A, #options
        B, #data
        aiclient,
              qwclient,
+              sfclient,
              dw):
     options=[]
     letters = "ABCDEFGHIJKLMN"
     for i in range(len(A)):
         options.append("给定选项" + letters[i]+",内容为"+A[i] )
-
+    time.sleep(5)
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
         #model="Qwen/Qwen3-14B",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
-            {"role": "user", "content": " 特殊处理要求:如果选项中有晒衣架制作安装的选项,但是工作内容中不涉及晒衣架,则去除选项中的晒衣架制作安装的选项"},
-            {"role": "user", "content": " 特殊处理要求:如果选项中有龙骨钢骨架制作安装的选项,但是工作内容中不涉及龙骨钢骨架,则去除选项中的龙骨钢骨架的选项"},
+            {"role": "user", "content": " 特殊处理要求:如果选项中有含有“铁件安装”字样的选项,并且选项中没有其他具体的钢构件安装选项(安装选项,不是制作选项),则不做任何处理"},
+            {"role": "user", "content": " 特殊处理要求:如果选项中有含有“铁件安装”字样的选项,并且选项中有其他具体的钢构件安装选项(例如钢骨架安装),则去除选项中的含有“铁件安装”字样的选项"},
             {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
-            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
     json_string = completion.choices[0].message.content
     print(json_string)
+    if len(json_string) < 4:
+        
+        answer=[]
+        if 'A' in json_string: 
+            answer.append(A[0])
+        if 'B' in json_string: 
+            answer.append(A[1])
+        if 'C' in json_string: 
+            answer.append(A[2])
+        if 'D' in json_string: 
+            answer.append(A[3])
+        if 'E' in json_string: 
+            answer.append(A[4])
+        if 'F' in json_string: 
+            answer.append(A[5])
+        if 'G' in json_string: 
+            answer.append(A[6])
+        if 'H' in json_string: 
+            answer.append(A[7])
+        if 'I' in json_string: 
+            answer.append(A[8])
+        if 'J' in json_string: 
+            answer.append(A[9])
+       
+        
+
+
+        return answer
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string:
+        answer.append(A[0])
+    if 'B' in json_string:
+        answer.append(A[1])
+    if 'C' in json_string:
+        answer.append(A[2])
+    if 'D' in json_string:
+        answer.append(A[3])
+    if 'E' in json_string:
+        answer.append(A[4])
+    if 'F' in json_string:
+        answer.append(A[5])
+    if 'G' in json_string:
+        answer.append(A[6])
+    if 'H' in json_string:
+        answer.append(A[7])
+    if 'I' in json_string:
+        answer.append(A[8])
+    if 'J' in json_string:
+        answer.append(A[9])
+    return answer
+def aifilter4(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+    time.sleep(5)
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
+        #model="Qwen/Qwen3-14B",
         messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B,C]的数组作为结果,请将该最终结果输出"},                       
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 特殊处理要求:如果选项中有含有“铁件制作”字样的选项,并且选项中没有其他具体的钢构件制作选项(制作选项,不是安装选项),则不做任何处理"},
+            {"role": "user", "content": " 特殊处理要求:如果选项中有含有“铁件制作”字样的选项,并且选项中有其他具体的钢构件选项(例如CZ轻钢檩条),则去除选项中的含有“铁件制作”字样的选项"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
     json_string = completion.choices[0].message.content
     print(json_string)
+    if len(json_string) < 4:
+        
+        answer=[]
+        if 'A' in json_string: 
+            answer.append(A[0])
+        if 'B' in json_string: 
+            answer.append(A[1])
+        if 'C' in json_string: 
+            answer.append(A[2])
+        if 'D' in json_string: 
+            answer.append(A[3])
+        if 'E' in json_string: 
+            answer.append(A[4])
+        if 'F' in json_string: 
+            answer.append(A[5])
+        if 'G' in json_string: 
+            answer.append(A[6])
+        if 'H' in json_string: 
+            answer.append(A[7])
+        if 'I' in json_string: 
+            answer.append(A[8])
+        if 'J' in json_string: 
+            answer.append(A[9])
+       
+        
+
+
+        return answer
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
     answer=[]
     if 'A' in json_string:
         answer.append(A[0])
@@ -278,21 +449,22 @@ def aifilter2(A, #options
        B, #data
        aiclient,
              qwclient,
+              sfclient,
              dw):
     options=[]
     letters = "ABCDEFGHIJKLMN"
     for i in range(len(A)):
         options.append("给定选项" + letters[i]+",内容为"+A[i] )
-
+    time.sleep(5)
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
         #model="Qwen/Qwen3-14B",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
-            {"role": "user", "content": " 特殊处理要求:如果选项中有含有“零星钢构件制作”字样的选项,并且选项中没有其他具体的钢构件选项,则不做任何处理"},
+            {"role": "user", "content": " 特殊处理要求:如果选项中有含有“零星钢构件制作”字样的选项,并且选项中没有其他具体的钢构件制作选项(制作选项,不是安装选项),则不做任何处理"},
             {"role": "user", "content": " 特殊处理要求:如果选项中有含有“零星钢构件制作”字样的选项,并且选项中有其他具体的钢构件选项(例如CZ轻钢檩条),则去除选项中的含有“零星钢构件制作”字样的选项"},
             {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
-            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
@@ -327,12 +499,9 @@ def aifilter2(A, #options
 
 
         return answer
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B,C]的数组作为结果,请将该最终结果输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
@@ -360,17 +529,23 @@ def aifilter2(A, #options
     if 'J' in json_string:
         answer.append(A[9])
     return answer
-def postprocess0106_(selected, data, aiclient, qwclient, label_name, name_dw):
+def postprocess0106_(selected, data, aiclient, qwclient, sfclient, label_name, name_dw):
     if len(selected) == 1:
         return selected
-    return aifilter3(aifilter2(aifilter1(selected, data, aiclient, qwclient, name_dw), data, aiclient, qwclient, name_dw), data, aiclient, qwclient, name_dw)
-def postprocess0106(selected, data, aiclient, qwclient, label_name, name_dw):
-    selected =  postprocess0106_(selected, data, aiclient, qwclient, label_name, name_dw)
-    hit = fanghuo(selected, data, aiclient, qwclient, name_dw)
+    return aifilter5(aifilter4(aifilter3(aifilter2(aifilter1(selected, data, aiclient, qwclient, sfclient, name_dw), data, aiclient, qwclient, sfclient, name_dw), data, aiclient, qwclient, sfclient, name_dw),data,aiclient,qwclient,sfclient,name_dw),data,aiclient,qwclient,sfclient,name_dw)
+def postprocess0106(selected, data, aiclient, qwclient, sfclient, label_name, name_dw,yqspecial):
+    selected = [x for x in selected if '金属面油漆' not in x]
+    selected = selected + yqspecial
+    if len([x for x in selected if '氟碳漆' in x]) > 0:
+        selected = [x for x in selected if '红丹防锈漆' not in x and '调和漆' not in x and '银粉漆' not in x]
+    if len([x for x in selected if '环氧富锌漆' in x]) > 0:
+        selected = [x for x in selected if '红丹防锈漆' not in x ]
+    selected =  postprocess0106_(selected, data, aiclient, qwclient, sfclient, label_name, name_dw)
+    hit = fanghuo(selected, data, aiclient, qwclient, sfclient, name_dw)
     if hit:
-        tuliao = fanghuotuliao(selected, data, aiclient, qwclient, name_dw)
+        tuliao = fanghuotuliao(selected, data, aiclient, qwclient, sfclient, name_dw)
         if not tuliao:
-            selected.append(tuliaofilter(selected, data, aiclient, qwclient, name_dw))
+            selected.append(tuliaofilter(selected, data, aiclient, qwclient, sfclient, name_dw))
         return selected
     else:
         return selected

+ 18 - 20
postprocess0108.py

@@ -1,3 +1,5 @@
+from template import xuanxiang
+from config import simplemodel
 tuliaos=[
 '第十七章  油漆、涂料、裱糊工程 17.1  油漆、涂料 17.1.2  金属面油漆 17.1.2.3  防火涂料 金属面防火涂料 薄型 0.5小时',
 '第十七章  油漆、涂料、裱糊工程 17.1  油漆、涂料 17.1.2  金属面油漆 17.1.2.3  防火涂料 金属面防火涂料 薄型 1小时',
@@ -21,6 +23,7 @@ def aifilter1(A, #options
        B, #data
        aiclient,
              qwclient,
+             sfclient,
              dw):
     options=[]
     letters = "ABCDEFGHIJKLMN"
@@ -28,7 +31,7 @@ def aifilter1(A, #options
         options.append("给定选项" + letters[i]+",内容为"+A[i] )
 
     completion = aiclient.chat.completions.create(
-        model="glm-z1-flash",
+        model="glm-4.5-air",
         #model="Qwen/Qwen3-14B",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
@@ -36,19 +39,16 @@ def aifilter1(A, #options
             {"role": "user", "content": " 特殊处理要求:去掉所有含有“金属面油漆”字样的选项"},
             {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
             {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
-            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
-        extra_body={"thinking": {"type": "enabled"}},
+        extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B,C]的数组作为结果,请将该最终结果输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
@@ -80,6 +80,7 @@ def aifilter2(A, #options
        B, #data
        aiclient,
              qwclient,
+             sfclient,
              dw):
     options=[]
     letters = "ABCDEFGHIJKLMN"
@@ -87,7 +88,7 @@ def aifilter2(A, #options
         options.append("给定选项" + letters[i]+",内容为"+A[i] )
 
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
         #model="Qwen/Qwen3-14B",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
@@ -95,7 +96,7 @@ def aifilter2(A, #options
             {"role": "user", "content": " 特殊处理要求:防火门与放火卷帘门是完全不同的产品。如果工作内容的描述没有明确提及防火卷帘门,则去掉防火卷帘门的选项"},
             {"role": "user", "content": " 特殊处理要求:防火门与铝合金门是完全不同的产品。如果工作内容的描述是防火门,则去掉铝合金门的选项"},
             {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
-            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
@@ -130,12 +131,9 @@ def aifilter2(A, #options
 
 
         return answer
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B,C]的数组作为结果,请将该最终结果输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
@@ -163,12 +161,12 @@ def aifilter2(A, #options
     if 'J' in json_string:
         answer.append(A[9])
     return answer
-def postprocess0108(selected, data, aiclient, qwclient, label_name, name_dw):
-    prime = aifilter1(selected, data, aiclient, qwclient, name_dw)
+def postprocess0108(selected, data, aiclient, qwclient, sfclient, label_name, name_dw):
+    prime = aifilter1(selected, data, aiclient, qwclient, sfclient, name_dw)
     if len(prime) == 0:
         return []
     
-    prime = aifilter2(prime, data, aiclient, qwclient, name_dw)
+    prime = aifilter2(prime, data, aiclient, qwclient, sfclient, name_dw)
     if len(prime) == 0:
         return []
     return selected

+ 234 - 48
postprocess0109.py

@@ -1,4 +1,6 @@
 import json
+from config import simplemodel
+from template import xuanxiang
 with open('zhaoping_rule', 'r') as f:
     content = f.read()
 import json
@@ -9,10 +11,75 @@ import json
 name_label=json.loads(content)
 baohuceng = ['10-74', '10-75', '10-77', '10-78', '10-80', '10-81', '10-83', '10-84', '10-86', '10-87', '10-90']
 from fallback import fallback
+def shuangceng(data,aiclient,sfclient):
+    
+    completion = aiclient.chat.completions.create(
+        model='glm-4.5-air',
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 在屋面卷材施工中,卷材可做一层(一道),可做两层(两道)。给你一段文字如下, " + data['tz'] + ",其中提及卷材施工,请问其中明确提及做两层(两道)吗?请回答是或者否"},                       
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if len(json_string) < 4:
+        if '否' in json_string:
+            return False
+        return True
+    
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个是否施工两层卷材的判断,请将该判断输出,请输出是或者否"},                       
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if '否' in json_string:
+        return False
+    return True
+def zhaopingceng(data,aiclient,sfclient):
+    
+    completion = aiclient.chat.completions.create(
+        model='glm-4.5-air',
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 在屋面防水施工中,细石混凝土可能做找平(找坡)层,也可能做面层(保护层)。给你一段文字如下, " + data['tz'] + ",其中提及细石混凝土,请问其中提及细石混凝土是做找平(找坡)层吗?请回答是或者否"},                       
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if len(json_string) < 4:
+        if '否' in json_string:
+            return False
+        return True
+    
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个是否为细石混凝土找平层的判断,请将该判断输出,请输出是或者否"},                       
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if '否' in json_string:
+        return False
+    return True
 def aifilter3(A, #options
        B, #data
        aiclient,
              qwclient,
+             sfclient,
              dw):
     options=[]
     letters = "ABCDEFGHIJK"
@@ -32,8 +99,8 @@ def aifilter3(A, #options
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
         messages=[
             {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
             {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A或者B的选项作为结果,请将该最终结果输出"},                       
@@ -71,6 +138,7 @@ def aifilter4(A, #options
        B, #data
        aiclient,
              qwclient,
+             sfclient,
              dw):
     options=[]
     letters = "ABCDEFGHIJKLMN"
@@ -78,27 +146,24 @@ def aifilter4(A, #options
         options.append("给定选项" + letters[i]+",内容为"+A[i] )
 
     completion = aiclient.chat.completions.create(
-        model="glm-z1-air",
+        model="glm-4.5-air",
         #model="Qwen/Qwen3-14B",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
-            {"role": "user", "content": " 背景知识:石油沥青,沥青马蹄脂,渗透结晶防水材料等都是防水涂料。"},
-            {"role": "user", "content": " 特殊处理要求:如果选项中有多个防水涂料的选项,则只能选择一个防水涂料选项,且优先选择渗透结晶防水材料;与防水涂料无关的选项全部保留"},
+            {"role": "user", "content": " 背景知识:石油沥青,沥青马蹄脂,渗透结晶防水材料等都是防水涂料。砂浆隔离层、混凝土防水层不是防水涂料"},
+            {"role": "user", "content": " 特殊处理要求:如果选项中有多个防水涂料的选项(砂浆隔离层、混凝土防水层不是防水涂料),则只能选择一个防水涂料选项,且优先选择渗透结晶防水材料;与防水涂料无关的选项全部保留"},
             {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
             {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
-            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
-        extra_body={"thinking": {"type": "enabled"}},
+        extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B,C]的数组作为结果,请将该最终结果输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
@@ -126,49 +191,48 @@ def aifilter4(A, #options
     if 'J' in json_string:
         answer.append(A[9])
     return answer
-def aifilter1(A, #options
+def aifilter1_1(A, #options
        B, #data
        aiclient,
              qwclient,
+             sfclient,
              dw):
     options=[]
     letters = "ABCDEFGHIJKLMN"
     for i in range(len(A)):
         options.append("给定选项" + letters[i]+",内容为"+A[i] )
 
-    completion = qwclient.chat.completions.create(
-        #model="glm-z1-flash",
-        model="Qwen/Qwen3-8B",
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="Qwen/Qwen3-8B",
         #model="ernie-speed-128k",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
-            {"role": "user", "content": " 特殊处理要求:去掉所有含有“干铺卷材”字样的选项"},
-            {"role": "user", "content": " 特殊处理要求:如果工作内容描述中没有明确提到玻纤网格布,则去掉所有含有“玻纤网格布”字样的选项"},
-            {"role": "user", "content": " 特殊处理要求:如果工作内容描述的是屋面刚性层,则去掉所有含有卷材字样的选项"},
-            {"role": "user", "content": " 特殊处理要求:如果工作内容描述没有明确的“加浆抹光”字样,则去掉所有含有“加浆抹光”字样的选项"},
-            {"role": "user", "content": " 特殊处理要求:如果工作内容描述没有单独的一道“素水泥浆”工序,则去掉所有含有“素水泥浆”字样的选项"},
+            {"role": "user", "content": " 特殊处理要求一:去掉所有含有“干铺卷材”字样的选项"},
+            {"role": "user", "content": " 特殊处理要求二:如果工作内容描述中没有明确提到玻纤网格布,则去掉所有含有“玻纤网格布”字样的选项"},
+            {"role": "user", "content": " 特殊处理要求三:如果工作内容描述的是屋面刚性层,则去掉所有精确含有‘卷材’字样的选项"},
             {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
             {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
-            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
-        #extra_body={"thinking": {"type": "enabled"}},
-        extra_body={"enable_thinking": True},
-        stream=True
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
     )
-    done_thinking = False
-    json_string=""
-    thinking_json_string=""
-    for chunk in completion:
-        thinking_chunk = chunk.choices[0].delta.reasoning_content
-        answer_chunk = chunk.choices[0].delta.content
-        if thinking_chunk != '':
-            thinking_json_string = thinking_json_string +  thinking_chunk
-        elif answer_chunk != '':
-            if not done_thinking:
-                done_thinking = True
-            json_string = json_string + answer_chunk
-    #json_string = completion.choices[0].message.content
-    print(thinking_json_string)
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(thinking_json_string)
     print(json_string)
     if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
         answer=[]
@@ -193,13 +257,106 @@ def aifilter1(A, #options
         if 'J' in json_string and len(A) > 9:
             answer.append(A[9])
         return answer
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string:
+        answer.append(A[0])
+    if 'B' in json_string:
+        answer.append(A[1])
+    if 'C' in json_string:
+        answer.append(A[2])
+    if 'D' in json_string:
+        answer.append(A[3])
+    if 'E' in json_string:
+        answer.append(A[4])
+    if 'F' in json_string:
+        answer.append(A[5])
+    if 'G' in json_string:
+        answer.append(A[6])
+    if 'H' in json_string:
+        answer.append(A[7])
+    if 'I' in json_string:
+        answer.append(A[8])
+    if 'J' in json_string:
+        answer.append(A[9])
+    return answer
+def aifilter1_2(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+             sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
     completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
+        #model="Qwen/Qwen3-8B",
+        #model="ernie-speed-128k",
         messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B,C]的数组作为结果,请将该最终结果输出"},                       
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 特殊处理要求一:如果工作内容描述没有明确的“加浆抹光”字样,则去掉所有含有“加浆抹光”字样的选项"},
+            {"role": "user", "content": " 特殊处理要求二:如果工作内容描述没有单独的一道“素水泥浆”工序,则去掉所有含有“素水泥浆”字样的选项"},
+            {"role": "user", "content": " 特殊处理要求三:如果选项中同时存在保温砂浆选项跟水泥砂浆选项,则去掉水泥砂浆的选项"},
+            {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
         extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(thinking_json_string)
+    print(json_string)
+    if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
+        answer=[]
+        if 'A' in json_string and len(A) > 0:
+            answer.append(A[0])
+        if 'B' in json_string and len(A) > 1:
+            answer.append(A[1])
+        if 'C' in json_string and len(A) > 2:
+            answer.append(A[2])
+        if 'D' in json_string and len(A) > 3:
+            answer.append(A[3])
+        if 'E' in json_string and len(A) > 4:
+            answer.append(A[4])
+        if 'F' in json_string and len(A) > 5:
+            answer.append(A[5])
+        if 'G' in json_string and len(A) > 6:
+            answer.append(A[6])
+        if 'H' in json_string and len(A) > 7:
+            answer.append(A[7])
+        if 'I' in json_string and len(A) > 8:
+            answer.append(A[8])
+        if 'J' in json_string and len(A) > 9:
+            answer.append(A[9])
+        return answer
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
     json_string = completion.choices[0].message.content
@@ -230,6 +387,7 @@ def aifilter2(A, #options
        B, #data
        aiclient,
              qwclient,
+             sfclient,
              dw):
     hit_wumian = False
     for entry in A:
@@ -247,20 +405,48 @@ def aifilter2(A, #options
     return A
 
 
-def postprocess0109(selected, data, aiclient, qwclient, label_name, name_dw):
-    prime = aifilter1(selected, data, aiclient, qwclient, name_dw)
+def postprocess0109(selected, data, aiclient, qwclient, sfclient, label_name, name_dw):
+    juancai = [x for x in selected if 'SBS改性沥青防水卷材' in x or 'APP改性沥青防水卷材' in x]
+    no_juancai = [x for x in selected if x not in juancai]
+    group_juancai = [
+       [name_label['10-30'],name_label['10-31']],
+       [name_label['10-32'],name_label['10-33']],
+       [name_label['10-34'],name_label['10-35']],
+       [name_label['10-36'],name_label['10-37']],
+       [name_label['10-38'],name_label['10-39']],
+       [name_label['10-40'],name_label['10-41']],
+       [name_label['10-42'],name_label['10-43']],
+       [name_label['10-44'],name_label['10-45']],
+    ]
+    if len(juancai) > 0:
+        hit_group=[]
+        for entry in group_juancai:
+            if juancai[0] in entry:
+                hit_group=entry
+        shuang = shuangceng(data,aiclient,sfclient)
+        if shuang:
+            selected = no_juancai + [hit_group[1]]
+        else:
+            selected = no_juancai + [hit_group[0]]
+            
+    if len([x for x in selected if '屋面找平层' in x and '细石混凝土' in x]) > 0:
+        zhaoping = zhaopingceng(data,aiclient,sfclient)
+        if not zhaoping:
+            selected = [x for x in selected if not ('屋面找平层' in x and '细石混凝土' in x)]
+    prime = aifilter1_1(selected, data, aiclient, qwclient, sfclient, name_dw)
+    prime = aifilter1_2(prime, data, aiclient, qwclient, sfclient, name_dw)
     if data['bianma'].startswith("010902") and '高聚物' in data['tz'] and '改性沥青防水涂料' in data['tz']:##屋面防水
         if '第十章  屋面及防水工程 10.2  平面立面及其它防水 10.2.1  涂刷油类 水泥基渗透结晶 防水材料 二~三遍(厚2mm)' not in prime:
             prime.append('第十章  屋面及防水工程 10.2  平面立面及其它防水 10.2.1  涂刷油类 水泥基渗透结晶 防水材料 二~三遍(厚2mm)') ##需要换
     if data['bianma'].startswith("010902") and '非固化' in data['tz'] and '沥青防水涂料' in data['tz']:##屋面防水
         if '第十章  屋面及防水工程 10.2  平面立面及其它防水 10.2.1  涂刷油类 水泥基渗透结晶 防水材料 二~三遍(厚2mm)' not in prime:
             prime.append('第十章  屋面及防水工程 10.2  平面立面及其它防水 10.2.1  涂刷油类 水泥基渗透结晶 防水材料 二~三遍(厚2mm)') ##需要换
-    prime = aifilter2(prime, data, aiclient, qwclient, name_dw)##找平层去重
-    prime = aifilter4(prime, data, aiclient, qwclient, name_dw)##沥青去重
+    prime = aifilter2(prime, data, aiclient, qwclient, sfclient, name_dw)##找平层去重
+    prime = aifilter4(prime, data, aiclient, qwclient, sfclient, name_dw)##沥青去重
     if data['bianma'].startswith("010902") and '保护层' in data['tz']:##屋面防水保护层
         l = len([x for x in prime if '刚性防水屋面' in x])
         if l==0:
-            answer = aifilter3(prime, data, aiclient, qwclient, name_dw)
+            answer = aifilter3(prime, data, aiclient, qwclient, sfclient, name_dw)
             prime.append(answer[0])
     if '南通补充定额 南通补充定额2016 第十章 屋面及防水工程 干铺法施工水泥彩瓦屋面(砼屋面板上钉钢挂瓦条、顺水条)' in prime:
         prime.append('南通补充定额 南通补充定额2016 第十章 屋面及防水工程 干铺法施工水泥彩瓦屋面(铺瓦)')

+ 83 - 79
postprocess0110.py

@@ -1,4 +1,6 @@
 import json
+from config import simplemodel 
+from template import xuanxiang
 with open('zhaoping_rule', 'r') as f:
     content = f.read()
 import json
@@ -13,49 +15,50 @@ def aifilter4(A, #options
        B, #data
        aiclient,
              qwclient,
+             sfclient,
              dw):
     options=[]
     letters = "ABCDEFGHIJKLMN"
     for i in range(len(A)):
         options.append("给定选项" + letters[i]+",内容为"+A[i] )
 
-    completion = qwclient.chat.completions.create(
-        #model="glm-z1-flash",
-        model="Qwen/Qwen3-8B",
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-flash",
+        #model="Qwen/Qwen3-8B",
         #model="ernie-speed-128k",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
-            {"role": "user", "content": " 背景知识:土建施工中,保温做法有很多种,包括保温砖,保温瓦,保温砂浆,保温板等。保温板有不同材料,比如聚苯乙烯泡沫板,聚苯乙烯挤塑板,聚氨酯保温板等"},
+            {"role": "user", "content": " 背景知识:土建施工中,保温做法有很多种,包括保温砖,保温瓦,保温砂浆,保温板等。保温板有不同材料,比如聚苯乙烯泡沫板,聚苯乙烯挤塑板,聚氨酯保温板,玻璃棉板,矿棉板等"},
             {"role": "user", "content": "问题描述:" + ",".join(options)  + "。请问选项中是否有保温板的选项?请回答是或者否"},
         ],
-        #extra_body={"thinking": {"type": "enabled"}},
-        extra_body={"enable_thinking": True},
-        stream=True
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
     )
-    done_thinking = False
-    json_string=""
-    thinking_json_string=""
-    for chunk in completion:
-        thinking_chunk = chunk.choices[0].delta.reasoning_content
-        answer_chunk = chunk.choices[0].delta.content
-        if thinking_chunk != '':
-            thinking_json_string = thinking_json_string +  thinking_chunk
-        elif answer_chunk != '':
-            if not done_thinking:
-                done_thinking = True
-            json_string = json_string + answer_chunk
-    #json_string = completion.choices[0].message.content
-    print(thinking_json_string)
+    ##done_thinking = False
+    ##json_string=""
+    ##thinking_json_string=""
+    ##for chunk in completion:
+    ##    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    ##    answer_chunk = chunk.choices[0].delta.content
+    ##    if thinking_chunk != '':
+    ##        thinking_json_string = thinking_json_string +  thinking_chunk
+    ##    elif answer_chunk != '':
+    ##        if not done_thinking:
+    ##            done_thinking = True
+    ##        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(thinking_json_string)
     print(json_string)
     if len(json_string) < 4:
         if '否' in json_string:
             return False
         return True
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
         messages=[
             {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个是或者否的判断,请将该判断输出"},                       
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个是或者否的判断,请将该中文判断输出"},                       
         ],
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
@@ -70,49 +73,50 @@ def aifilter3(A, #options
        B, #data
        aiclient,
              qwclient,
+             sfclient,
              dw):
     options=[]
     letters = "ABCDEFGHIJKLMN"
     for i in range(len(A)):
         options.append("给定选项" + letters[i]+",内容为"+A[i] )
 
-    completion = qwclient.chat.completions.create(
-        #model="glm-z1-flash",
-        model="Qwen/Qwen3-8B",
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-flash",
+        #model="Qwen/Qwen3-8B",
         #model="ernie-speed-128k",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
             {"role": "user", "content": " 背景知识:土建施工中,保温做法有很多种,包括保温砖,保温瓦,保温砂浆,保温板等。保温板有不同材料,比如聚苯乙烯泡沫板,聚苯乙烯挤塑板,聚氨酯保温板, 岩棉保温板等"},
             {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] + "。请问工作内容的描述中有涉及保温板吗?请回答是或者否"},
         ],
-        #extra_body={"thinking": {"type": "enabled"}},
-        extra_body={"enable_thinking": True},
-        stream=True
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
     )
-    done_thinking = False
-    json_string=""
-    thinking_json_string=""
-    for chunk in completion:
-        thinking_chunk = chunk.choices[0].delta.reasoning_content
-        answer_chunk = chunk.choices[0].delta.content
-        if thinking_chunk != '':
-            thinking_json_string = thinking_json_string +  thinking_chunk
-        elif answer_chunk != '':
-            if not done_thinking:
-                done_thinking = True
-            json_string = json_string + answer_chunk
-    #json_string = completion.choices[0].message.content
-    print(thinking_json_string)
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(thinking_json_string)
     print(json_string)
     if len(json_string) < 4:
         if '否' in json_string:
             return False
         return True
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
         messages=[
             {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个是或者否的判断,请将该判断输出"},                       
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个是或者否的判断,请将该中文判断输出"},                       
         ],
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
@@ -127,41 +131,42 @@ def aifilter1(A, #options
        B, #data
        aiclient,
              qwclient,
+             sfclient,
              dw):
     options=[]
     letters = "ABCDEFGHIJKLMN"
     for i in range(len(A)):
         options.append("给定选项" + letters[i]+",内容为"+A[i] )
 
-    completion = qwclient.chat.completions.create(
-        #model="glm-z1-flash",
-        model="Qwen/Qwen3-8B",
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-flash",
+        #model="Qwen/Qwen3-8B",
         #model="ernie-speed-128k",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
             {"role": "user", "content": " 特殊处理要求:如果工作内容描述中没有明确提到素水泥浆,则去掉所有含有“素水泥浆”字样的选项"},
             {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
             {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
-            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
-        #extra_body={"thinking": {"type": "enabled"}},
-        extra_body={"enable_thinking": True},
-        stream=True
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
     )
-    done_thinking = False
-    json_string=""
-    thinking_json_string=""
-    for chunk in completion:
-        thinking_chunk = chunk.choices[0].delta.reasoning_content
-        answer_chunk = chunk.choices[0].delta.content
-        if thinking_chunk != '':
-            thinking_json_string = thinking_json_string +  thinking_chunk
-        elif answer_chunk != '':
-            if not done_thinking:
-                done_thinking = True
-            json_string = json_string + answer_chunk
-    #json_string = completion.choices[0].message.content
-    print(thinking_json_string)
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(thinking_json_string)
     print(json_string)
     if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
         answer=[]
@@ -186,12 +191,9 @@ def aifilter1(A, #options
         if 'J' in json_string and len(A) > 9:
             answer.append(A[9])
         return answer
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B,C]的数组作为结果,请将该最终结果输出"},                       
-        ],
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
@@ -223,6 +225,7 @@ def aifilter2(A, #options
        B, #data
        aiclient,
              qwclient,
+              sfclient,
              dw):
     hit_wumian = False
     for entry in A:
@@ -240,16 +243,17 @@ def aifilter2(A, #options
     return A
 
 
-def postprocess0110(selected, data, aiclient, qwclient, label_name, name_dw):
-    ban = aifilter3(selected, data, aiclient, qwclient, name_dw)
+def postprocess0110(selected, data, aiclient, qwclient, sfclient, label_name, name_dw):
+    ban = aifilter3(selected, data, aiclient, qwclient, sfclient, name_dw)
     if ban:
-        ban2 = aifilter4(selected, data, aiclient, qwclient, name_dw)
-        if not ban2:
+        ban2 = aifilter4(selected, data, aiclient, qwclient, sfclient, name_dw)
+        if not ban2 and len([x for x in selected if '矿棉' in x or '超细玻璃棉' in x])==0:
             if '墙面' in data['mc']:
                 selected.append('第十一章  保温、隔热、防腐工程 11.1  保温、隔热工程 11.1.2  墙、柱、天棚及其它 外墙外保温  聚苯乙烯挤塑板 厚度25mm 混凝土墙面')
             else:
                 selected.append('第十一章  保温、隔热、防腐工程 11.1  保温、隔热工程 11.1.1  屋、楼地面 屋面、楼地面保温隔热 聚苯乙烯挤塑板(厚25mm)')
-    prime = aifilter1(selected, data, aiclient, qwclient, name_dw)
+    selected = list(set(selected))
+    prime = aifilter1(selected, data, aiclient, qwclient, sfclient, name_dw)
     if '界面剂' in data['tz']:##保温
         if len([x for x in prime if '第十四章  墙柱面工程 14.1  一般抹灰 14.1.3  保温砂浆及抗裂基层 刷界面剂' in x]) == 0:
             prime.append('第十四章  墙柱面工程 14.1  一般抹灰 14.1.3  保温砂浆及抗裂基层 刷界面剂 混凝土面') ##需要换

+ 330 - 68
postprocess0111.py

@@ -1,4 +1,7 @@
 import time
+from config import simplemodel
+from template import xuanxiang
+from postprocess011105 import postprocess011105
 import json
 with open('zhaoping_rule', 'r') as f:
     content = f.read()
@@ -9,8 +12,17 @@ with open('name_label', 'r') as f:
 import json
 name_label=json.loads(content)
 baohuceng = ['10-74', '10-75', '10-77', '10-78', '10-80', '10-81', '10-83', '10-84', '10-86', '10-87', '10-90']
+example1='''
+1、用于地下楼梯间、走道地面 做法详见图纸设计及相关图集规范
+2、100厚C25细石混凝土随打随抹,内配Φ4@200钢筋网片,抹平压光
+'''
+example2='''
+1、用于消防泵房地面 做法详见图纸设计及相关图集规范
+2、50厚C25细石混凝土随打随抹,内配Φ4@200钢筋网片,抹平压光
+3、最薄处30厚C20细石混凝土向排水沟、集水坑找1%坡,随打随抹平,立管根部用DS M15砂浆(1:3水泥砂浆)抹小八字角
+'''
 from fallback import fallback
-def aifilter3(A, #options
+def aifilter5(A, #options
        B, #data
        aiclient,
              qwclient,
@@ -21,14 +33,60 @@ def aifilter3(A, #options
     for i in range(len(A)):
         options.append("给定选项" + letters[i]+",内容为"+A[i] )
 
+    completion = aiclient.chat.completions.create(
+        #model="THUDM/GLM-4-9B-0414",
+        model="glm-4.5-air",
+        #model="Qwen/Qwen3-8B",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": "楼地面工程中,细石混凝土(砼)可能作为面层,也可能作为找平(找坡)层。例如,给定一段工作内容: " + example1 + "\n内容中提及细石混凝土,内配钢筋网片,这是典型的面层。所以这段文字没有包含细石混凝土找平(找坡)层"},
+            {"role": "user", "content": "再例如,给定一段工作内容: " + example2 + "\n内容中提及细石混凝土,内配钢筋网片,这是典型的面层。文字中还提及细石混凝土找1%坡,这是典型的找平(找坡)层。所以这段文字包含细石混凝土找平(找坡)层"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] + "。请问该工作内容的描述中有包含混凝土找平(找坡)层吗?请回答有或者没有"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if len(json_string) < 4:
+        if '没有' in json_string:
+            return False
+        return True
     completion = sfclient.chat.completions.create(
-        model="THUDM/GLM-4-9B-0414",
-        #model="glm-4.5-flash",
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个有或者没有的判断,请将该中文判断输出"},                       
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if '没有' in json_string:
+        return False
+    return True
+def aifilter3(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        #model="THUDM/GLM-4-9B-0414",
+        model="glm-4.5-air",
         #model="Qwen/Qwen3-8B",
         #model="ernie-speed-128k",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
-            {"role": "user", "content": "问题描述: 细石混凝土内配钢丝网片是一种常见的施工工艺。给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] + "。请问工作内容的描述中有该施工工艺吗?请回答有或者没有"},
+            {"role": "user", "content": "问题描述: 细石混凝土内配钢网片是一种常见的施工工艺。给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] + "。请问该工作内容的描述中有同时包含混凝土以及钢筋网片吗?请回答有或者没有"},
         ],
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": True},
@@ -41,11 +99,10 @@ def aifilter3(A, #options
             return False
         return True
     completion = sfclient.chat.completions.create(
-        #model="glm-4.5-flash",
-        model="THUDM/GLM-4-9B-0414",
+        model=simplemodel(),
         messages=[
             {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个有或者没有的判断,请将该判断输出"},                       
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个有或者没有的判断,请将该中文判断输出"},                       
         ],
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
@@ -60,47 +117,48 @@ def aifilter4(A, #options
        B, #data
        aiclient,
              qwclient,
+              sfclient,
              dw):
     options=[]
     letters = "ABCDEFGHIJKLMN"
     for i in range(len(A)):
         options.append("给定选项" + letters[i]+",内容为"+A[i] )
 
-    completion = qwclient.chat.completions.create(
-        #model="glm-z1-flash",
-        model="Qwen/Qwen3-14B",
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
         #model="ernie-speed-128k",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
             {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土找平层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:细石混凝土楼地面不是块料面层,而是混凝土整体面层"},
             {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土垫层是不同的施工步骤,不得混淆"},
             {"role": "user", "content": " 背景知识:混凝土(砼)找平层跟混凝土垫层是不同的施工步骤,不得混淆"},
             {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆面层是不同的施工步骤,不得混淆"},
             {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆保护层是不同的施工步骤,不得混淆"},
-            {"role": "user", "content": " 背景知识:“混凝土楼地面”施工是面层施工,跟“楼地面涂刷一遍901胶素水泥浆”是不同的施工步骤,不得混淆"},
-            {"role": "user", "content": " 特殊处理要求:如果工作内容描述中明确提到块料面层,比如地砖、石材块料等,则保留楼地面涂刷一遍901胶素水泥浆选项,去掉所有混凝土(砼)整体面层的选项"},
+            {"role": "user", "content": " 特殊处理要求:如果工作内容描述中明确提到块料面层,比如地砖、石材块料等,则去掉精确包含“细石砼整体面层”字样的选项"},
             {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
             {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
-            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果, 删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
-        #extra_body={"thinking": {"type": "enabled"}},
-        extra_body={"enable_thinking": True},
-        stream=True
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
     )
-    done_thinking = False
-    json_string=""
-    thinking_json_string=""
-    for chunk in completion:
-        thinking_chunk = chunk.choices[0].delta.reasoning_content
-        answer_chunk = chunk.choices[0].delta.content
-        if thinking_chunk != '':
-            thinking_json_string = thinking_json_string +  thinking_chunk
-        elif answer_chunk != '':
-            if not done_thinking:
-                done_thinking = True
-            json_string = json_string + answer_chunk
-    #json_string = completion.choices[0].message.content
-    print(thinking_json_string)
+    ##done_thinking = False
+    ##json_string=""
+    ##thinking_json_string=""
+    ##for chunk in completion:
+    ##    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    ##    answer_chunk = chunk.choices[0].delta.content
+    ##    if thinking_chunk != '':
+    ##        thinking_json_string = thinking_json_string +  thinking_chunk
+    ##    elif answer_chunk != '':
+    ##        if not done_thinking:
+    ##            done_thinking = True
+    ##        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
     print(json_string)
     if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
         answer=[]
@@ -125,14 +183,214 @@ def aifilter4(A, #options
         if 'J' in json_string and len(A) > 9:
             answer.append(A[9])
         return answer
-    completion = qwclient.chat.completions.create(
-        model="ZhipuAI/GLM-4.5",
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
         #model="glm-4.5-flash",
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string and len(A) > 0:
+        answer.append(A[0])
+    if 'B' in json_string and len(A) > 1:
+        answer.append(A[1])
+    if 'C' in json_string and len(A) > 2:
+        answer.append(A[2])
+    if 'D' in json_string and len(A) > 3:
+        answer.append(A[3])
+    if 'E' in json_string and len(A) > 4:
+        answer.append(A[4])
+    if 'F' in json_string and len(A) > 5:
+        answer.append(A[5])
+    if 'G' in json_string and len(A) > 6:
+        answer.append(A[6])
+    if 'H' in json_string and len(A) > 7:
+        answer.append(A[7])
+    if 'I' in json_string and len(A) > 8:
+        answer.append(A[8])
+    if 'J' in json_string and len(A) > 9:
+        answer.append(A[9])
+    return answer
+def aifilter1_2(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
         messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B]的数组作为结果,请将该最终结果输出"},                       
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 背景知识:混凝土楼地面是面层,跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土找平层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)找平层跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆面层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆保护层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 特殊处理要求一:如果工作内容描述中**没有**明确提到素水泥浆,则**去掉**所有含有“素水泥浆”字样的选项"},
+            {"role": "user", "content": " 特殊处理要求二:如果工作内容描述中**没有**明确提到“加浆抹光”,则**去掉**所有含有“加浆抹光”字样的选项"},
+            {"role": "user", "content": " 特殊处理要求三:如果工作内容描述中**没有**明确提到混凝土垫层,则**去掉**所有混凝土垫层的选项"},
+            {"role": "user", "content": " 特殊处理要求四:如果选项中同时存在“冷轧带肋钢筋”选项和“抗裂基层 热镀锌钢丝网”选项,则去掉热镀锌钢丝网的选项"},
+            {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项.例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
+        answer=[]
+        if 'A' in json_string and len(A) > 0:
+            answer.append(A[0])
+        if 'B' in json_string and len(A) > 1:
+            answer.append(A[1])
+        if 'C' in json_string and len(A) > 2:
+            answer.append(A[2])
+        if 'D' in json_string and len(A) > 3:
+            answer.append(A[3])
+        if 'E' in json_string and len(A) > 4:
+            answer.append(A[4])
+        if 'F' in json_string and len(A) > 5:
+            answer.append(A[5])
+        if 'G' in json_string and len(A) > 6:
+            answer.append(A[6])
+        if 'H' in json_string and len(A) > 7:
+            answer.append(A[7])
+        if 'I' in json_string and len(A) > 8:
+            answer.append(A[8])
+        if 'J' in json_string and len(A) > 9:
+            answer.append(A[9])
+        return answer
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string and len(A) > 0:
+        answer.append(A[0])
+    if 'B' in json_string and len(A) > 1:
+        answer.append(A[1])
+    if 'C' in json_string and len(A) > 2:
+        answer.append(A[2])
+    if 'D' in json_string and len(A) > 3:
+        answer.append(A[3])
+    if 'E' in json_string and len(A) > 4:
+        answer.append(A[4])
+    if 'F' in json_string and len(A) > 5:
+        answer.append(A[5])
+    if 'G' in json_string and len(A) > 6:
+        answer.append(A[6])
+    if 'H' in json_string and len(A) > 7:
+        answer.append(A[7])
+    if 'I' in json_string and len(A) > 8:
+        answer.append(A[8])
+    if 'J' in json_string and len(A) > 9:
+        answer.append(A[9])
+    return answer
+def aifilter1_3(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 背景知识:混凝土楼地面是面层,跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土找平层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)找平层跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆面层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆保护层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 特殊处理要求:如果工作内容描述中没有明确提到石材面刷防护剂,则去掉所有含有“石材面刷防护剂”字样的选项"},
+            {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项.例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
         extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
+        answer=[]
+        if 'A' in json_string and len(A) > 0:
+            answer.append(A[0])
+        if 'B' in json_string and len(A) > 1:
+            answer.append(A[1])
+        if 'C' in json_string and len(A) > 2:
+            answer.append(A[2])
+        if 'D' in json_string and len(A) > 3:
+            answer.append(A[3])
+        if 'E' in json_string and len(A) > 4:
+            answer.append(A[4])
+        if 'F' in json_string and len(A) > 5:
+            answer.append(A[5])
+        if 'G' in json_string and len(A) > 6:
+            answer.append(A[6])
+        if 'H' in json_string and len(A) > 7:
+            answer.append(A[7])
+        if 'I' in json_string and len(A) > 8:
+            answer.append(A[8])
+        if 'J' in json_string and len(A) > 9:
+            answer.append(A[9])
+        return answer
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
     json_string = completion.choices[0].message.content
@@ -163,15 +421,16 @@ def aifilter1(A, #options
        B, #data
        aiclient,
              qwclient,
+              sfclient,
              dw):
     options=[]
     letters = "ABCDEFGHIJKLMN"
     for i in range(len(A)):
         options.append("给定选项" + letters[i]+",内容为"+A[i] )
 
-    completion = qwclient.chat.completions.create(
-        #model="glm-z1-flash",
-        model="Qwen/Qwen3-14B",
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
         #model="ernie-speed-128k",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
@@ -183,33 +442,29 @@ def aifilter1(A, #options
             {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆保护层是不同的施工步骤,不得混淆"},
             {"role": "user", "content": " 特殊处理要求:如果工作内容描述中没有明确提到踢脚线,则去掉所有踢脚线的选项"},
             {"role": "user", "content": " 特殊处理要求:去掉所有模板工程的选项"},
-            {"role": "user", "content": " 特殊处理要求:如果工作内容描述中没有明确涉及水泥砂浆面层,且没有提及水泥砂浆保护层,则去掉所有20mm水泥砂浆楼地面的选项"},
-            {"role": "user", "content": " 特殊处理要求:如果工作内容描述中没有明确提到素水泥浆,则去掉所有含有“素水泥浆”字样的选项"},
-            {"role": "user", "content": " 特殊处理要求:如果工作内容描述中没有明确提到“加浆抹光”,则去掉所有含有“加浆抹光”字样的选项"},
-            {"role": "user", "content": " 特殊处理要求:如果工作内容描述中没有明确提到混凝土垫层,则去掉所有混凝土垫层的选项"},
-            {"role": "user", "content": " 特殊处理要求:如果选项中同时存在“冷轧带肋钢筋”选项和“抗裂基层 热镀锌钢丝网”选项,则去掉热镀锌钢丝网的选项"},
+            {"role": "user", "content": " 特殊处理要求:如果工作内容描述中没有明确涉及水泥砂浆面层,且没有提及水泥砂浆保护层,则去掉所有20mm水泥砂浆楼地面面层的选项.注意,是去除面层,不包括找平层"},
             {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
             {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
-            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果, 删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
-        #extra_body={"thinking": {"type": "enabled"}},
-        extra_body={"enable_thinking": True},
-        stream=True
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
     )
-    done_thinking = False
-    json_string=""
-    thinking_json_string=""
-    for chunk in completion:
-        thinking_chunk = chunk.choices[0].delta.reasoning_content
-        answer_chunk = chunk.choices[0].delta.content
-        if thinking_chunk != '':
-            thinking_json_string = thinking_json_string +  thinking_chunk
-        elif answer_chunk != '':
-            if not done_thinking:
-                done_thinking = True
-            json_string = json_string + answer_chunk
-    #json_string = completion.choices[0].message.content
-    print(thinking_json_string)
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
     print(json_string)
     if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
         answer=[]
@@ -234,13 +489,10 @@ def aifilter1(A, #options
         if 'J' in json_string and len(A) > 9:
             answer.append(A[9])
         return answer
-    completion = qwclient.chat.completions.create(
+    completion = sfclient.chat.completions.create(
         #model="glm-4.5-flash",
-        model="ZhipuAI/GLM-4.5",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B]的数组作为结果,请将该最终结果输出"},                       
-        ],
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
         extra_body={"thinking": {"type": "disabled"}},
         #extra_body={"enable_thinking": False},
     )
@@ -290,11 +542,21 @@ def aifilter2(A, #options
 
 
 def postprocess0111(selected, data, aiclient, qwclient, sfclient, label_name, name_dw):
-    prime = aifilter1(selected, data, aiclient, qwclient, name_dw)
+    if data['bianma'].startswith('011105'):
+        return postprocess011105(selected, data, aiclient, qwclient, sfclient, label_name, name_dw)
+    zhaoping = aifilter5(selected, data, aiclient, qwclient, sfclient, name_dw)
+    if zhaoping:
+        if '第十三章  楼地面工程 13.2  找平层 13.2.2  细石混凝土 C20细石混凝土找平层 厚40mm' not in selected:
+            selected.append('第十三章  楼地面工程 13.2  找平层 13.2.2  细石混凝土 C20细石混凝土找平层 厚40mm')
+    prime = aifilter1(selected, data, aiclient, qwclient, sfclient, name_dw)
+    time.sleep(1)
+    prime = aifilter1_2(prime, data, aiclient, qwclient, sfclient, name_dw)
+    time.sleep(1)
+    prime = aifilter1_3(prime, data, aiclient, qwclient, sfclient, name_dw)
     time.sleep(1)
     wangpian = aifilter3(prime, data, aiclient, qwclient, sfclient, name_dw)
     if not wangpian:
-        prime = aifilter4(prime, data, aiclient, qwclient, name_dw)
+        prime = aifilter4(prime, data, aiclient, qwclient, sfclient,name_dw)
 
     if '界面剂' in data['tz']:##保温
         if len([ x for x in prime if '第十四章  墙柱面工程 14.1  一般抹灰 14.1.3  保温砂浆及抗裂基层 刷界面剂' in x]) == 0:

+ 594 - 0
postprocess011105.py

@@ -0,0 +1,594 @@
+import time
+from config import simplemodel
+from template import xuanxiang
+import json
+with open('zhaoping_rule', 'r') as f:
+    content = f.read()
+import json
+obj=json.loads(content)
+with open('name_label', 'r') as f:
+    content = f.read()
+import json
+name_label=json.loads(content)
+tijiaoxian = ['13-95', '13-27', '13-127', '13-128', '13-129', '13-130', '13-131', '13-34', '13-72', '13-76', '13-100']
+example1='''
+1、用于地下楼梯间、走道地面 做法详见图纸设计及相关图集规范
+2、100厚C25细石混凝土随打随抹,内配Φ4@200钢筋网片,抹平压光
+'''
+example2='''
+1、用于消防泵房地面 做法详见图纸设计及相关图集规范
+2、50厚C25细石混凝土随打随抹,内配Φ4@200钢筋网片,抹平压光
+3、最薄处30厚C20细石混凝土向排水沟、集水坑找1%坡,随打随抹平,立管根部用DS M15砂浆(1:3水泥砂浆)抹小八字角
+'''
+from fallback import fallback
+def aifilter5(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(tijiaoxian)):
+        options.append("给定选项" + letters[i]+",内容为"+name_label[tijiaoxian[i]] )
+
+    completion = aiclient.chat.completions.create(
+        #model="THUDM/GLM-4-9B-0414",
+        model="glm-4.5-air",
+        #model="Qwen/Qwen3-8B",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] + "。该工作内容的描述中包含踢脚线施工。"+';'.join(options)+'。请选出与工作内容最匹配的一个选项并返回代号。例如,你觉得选项A最匹配,则你应该返回A'},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if len(json_string) < 4:
+        if 'A' in json_string:
+            answer.append(name_label[tijiaoxian[0]])
+        if 'B' in json_string:
+            answer.append(name_label[tijiaoxian[1]])
+        if 'C' in json_string:
+            answer.append(name_label[tijiaoxian[2]])
+        if 'D' in json_string:
+            answer.append(name_label[tijiaoxian[3]])
+        if 'E' in json_string:
+            answer.append(name_label[tijiaoxian[4]])
+        if 'F' in json_string:
+            answer.append(name_label[tijiaoxian[5]])
+        if 'G' in json_string:
+            answer.append(name_label[tijiaoxian[6]])
+        if 'H' in json_string:
+            answer.append(name_label[tijiaoxian[7]])
+        if 'I' in json_string:
+            answer.append(name_label[tijiaoxian[8]])
+        if 'J' in json_string:
+            answer.append(name_label[tijiaoxian[9]])
+        if 'K' in json_string:
+            answer.append(name_label[tijiaoxian[10]])
+        return answer
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A或者B的英文字母作为答案,请将该字母答案输出"},                       
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if 'A' in json_string:
+        answer.append(name_label[tijiaoxian[0]])
+    if 'B' in json_string:
+        answer.append(name_label[tijiaoxian[1]])
+    if 'C' in json_string:
+        answer.append(name_label[tijiaoxian[2]])
+    if 'D' in json_string:
+        answer.append(name_label[tijiaoxian[3]])
+    if 'E' in json_string:
+        answer.append(name_label[tijiaoxian[4]])
+    if 'F' in json_string:
+        answer.append(name_label[tijiaoxian[5]])
+    if 'G' in json_string:
+        answer.append(name_label[tijiaoxian[6]])
+    if 'H' in json_string:
+        answer.append(name_label[tijiaoxian[7]])
+    if 'I' in json_string:
+        answer.append(name_label[tijiaoxian[8]])
+    if 'J' in json_string:
+        answer.append(name_label[tijiaoxian[9]])
+    if 'K' in json_string:
+        answer.append(name_label[tijiaoxian[10]])
+    return answer
+def aifilter3(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        #model="THUDM/GLM-4-9B-0414",
+        model="glm-4.5-air",
+        #model="Qwen/Qwen3-8B",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": "问题描述: 细石混凝土内配钢筋网片是一种常见的施工工艺。给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] + "。请问该工作内容的描述中有同时包含混凝土以及钢筋网片吗?请回答有或者没有"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if len(json_string) < 4:
+        if '没有' in json_string:
+            return False
+        return True
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个有或者没有的判断,请将该中文判断输出"},                       
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if '没有' in json_string:
+        return False
+    return True
+
+def aifilter4(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土找平层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:细石混凝土楼地面不是块料面层,而是混凝土整体面层"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)找平层跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆面层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆保护层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 特殊处理要求:如果工作内容描述中明确提到块料面层,比如地砖、石材块料等,则去掉精确包含“细石砼整体面层”字样的选项"},
+            {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果, 删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    ##done_thinking = False
+    ##json_string=""
+    ##thinking_json_string=""
+    ##for chunk in completion:
+    ##    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    ##    answer_chunk = chunk.choices[0].delta.content
+    ##    if thinking_chunk != '':
+    ##        thinking_json_string = thinking_json_string +  thinking_chunk
+    ##    elif answer_chunk != '':
+    ##        if not done_thinking:
+    ##            done_thinking = True
+    ##        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
+        answer=[]
+        if 'A' in json_string and len(A) > 0:
+            answer.append(A[0])
+        if 'B' in json_string and len(A) > 1:
+            answer.append(A[1])
+        if 'C' in json_string and len(A) > 2:
+            answer.append(A[2])
+        if 'D' in json_string and len(A) > 3:
+            answer.append(A[3])
+        if 'E' in json_string and len(A) > 4:
+            answer.append(A[4])
+        if 'F' in json_string and len(A) > 5:
+            answer.append(A[5])
+        if 'G' in json_string and len(A) > 6:
+            answer.append(A[6])
+        if 'H' in json_string and len(A) > 7:
+            answer.append(A[7])
+        if 'I' in json_string and len(A) > 8:
+            answer.append(A[8])
+        if 'J' in json_string and len(A) > 9:
+            answer.append(A[9])
+        return answer
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        #model="glm-4.5-flash",
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string and len(A) > 0:
+        answer.append(A[0])
+    if 'B' in json_string and len(A) > 1:
+        answer.append(A[1])
+    if 'C' in json_string and len(A) > 2:
+        answer.append(A[2])
+    if 'D' in json_string and len(A) > 3:
+        answer.append(A[3])
+    if 'E' in json_string and len(A) > 4:
+        answer.append(A[4])
+    if 'F' in json_string and len(A) > 5:
+        answer.append(A[5])
+    if 'G' in json_string and len(A) > 6:
+        answer.append(A[6])
+    if 'H' in json_string and len(A) > 7:
+        answer.append(A[7])
+    if 'I' in json_string and len(A) > 8:
+        answer.append(A[8])
+    if 'J' in json_string and len(A) > 9:
+        answer.append(A[9])
+    return answer
+def aifilter1_2(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 背景知识:混凝土楼地面是面层,跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土找平层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)找平层跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆面层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆保护层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 特殊处理要求一:如果工作内容描述中**没有**明确提到素水泥浆,则**去掉**所有含有“素水泥浆”字样的选项"},
+            {"role": "user", "content": " 特殊处理要求二:如果工作内容描述中**没有**明确提到“加浆抹光”,则**去掉**所有含有“加浆抹光”字样的选项"},
+            {"role": "user", "content": " 特殊处理要求三:如果工作内容描述中**没有**明确提到混凝土垫层,则**去掉**所有混凝土垫层的选项"},
+            {"role": "user", "content": " 特殊处理要求四:如果选项中同时存在“冷轧带肋钢筋”选项和“抗裂基层 热镀锌钢丝网”选项,则去掉热镀锌钢丝网的选项"},
+            {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项.例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
+        answer=[]
+        if 'A' in json_string and len(A) > 0:
+            answer.append(A[0])
+        if 'B' in json_string and len(A) > 1:
+            answer.append(A[1])
+        if 'C' in json_string and len(A) > 2:
+            answer.append(A[2])
+        if 'D' in json_string and len(A) > 3:
+            answer.append(A[3])
+        if 'E' in json_string and len(A) > 4:
+            answer.append(A[4])
+        if 'F' in json_string and len(A) > 5:
+            answer.append(A[5])
+        if 'G' in json_string and len(A) > 6:
+            answer.append(A[6])
+        if 'H' in json_string and len(A) > 7:
+            answer.append(A[7])
+        if 'I' in json_string and len(A) > 8:
+            answer.append(A[8])
+        if 'J' in json_string and len(A) > 9:
+            answer.append(A[9])
+        return answer
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string and len(A) > 0:
+        answer.append(A[0])
+    if 'B' in json_string and len(A) > 1:
+        answer.append(A[1])
+    if 'C' in json_string and len(A) > 2:
+        answer.append(A[2])
+    if 'D' in json_string and len(A) > 3:
+        answer.append(A[3])
+    if 'E' in json_string and len(A) > 4:
+        answer.append(A[4])
+    if 'F' in json_string and len(A) > 5:
+        answer.append(A[5])
+    if 'G' in json_string and len(A) > 6:
+        answer.append(A[6])
+    if 'H' in json_string and len(A) > 7:
+        answer.append(A[7])
+    if 'I' in json_string and len(A) > 8:
+        answer.append(A[8])
+    if 'J' in json_string and len(A) > 9:
+        answer.append(A[9])
+    return answer
+def aifilter1_3(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 背景知识:混凝土楼地面是面层,跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土找平层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)找平层跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆面层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆保护层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 特殊处理要求:如果工作内容描述中没有明确提到石材面刷防护剂,则去掉所有含有“石材面刷防护剂”字样的选项"},
+            {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项.例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
+        answer=[]
+        if 'A' in json_string and len(A) > 0:
+            answer.append(A[0])
+        if 'B' in json_string and len(A) > 1:
+            answer.append(A[1])
+        if 'C' in json_string and len(A) > 2:
+            answer.append(A[2])
+        if 'D' in json_string and len(A) > 3:
+            answer.append(A[3])
+        if 'E' in json_string and len(A) > 4:
+            answer.append(A[4])
+        if 'F' in json_string and len(A) > 5:
+            answer.append(A[5])
+        if 'G' in json_string and len(A) > 6:
+            answer.append(A[6])
+        if 'H' in json_string and len(A) > 7:
+            answer.append(A[7])
+        if 'I' in json_string and len(A) > 8:
+            answer.append(A[8])
+        if 'J' in json_string and len(A) > 9:
+            answer.append(A[9])
+        return answer
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string and len(A) > 0:
+        answer.append(A[0])
+    if 'B' in json_string and len(A) > 1:
+        answer.append(A[1])
+    if 'C' in json_string and len(A) > 2:
+        answer.append(A[2])
+    if 'D' in json_string and len(A) > 3:
+        answer.append(A[3])
+    if 'E' in json_string and len(A) > 4:
+        answer.append(A[4])
+    if 'F' in json_string and len(A) > 5:
+        answer.append(A[5])
+    if 'G' in json_string and len(A) > 6:
+        answer.append(A[6])
+    if 'H' in json_string and len(A) > 7:
+        answer.append(A[7])
+    if 'I' in json_string and len(A) > 8:
+        answer.append(A[8])
+    if 'J' in json_string and len(A) > 9:
+        answer.append(A[9])
+    return answer
+def aifilter1(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 背景知识:混凝土楼地面是面层,跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土找平层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)找平层跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆面层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆保护层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 特殊处理要求:去掉所有模板工程的选项"},
+            {"role": "user", "content": " 特殊处理要求:如果工作内容描述中没有明确涉及水泥砂浆面层,且没有提及水泥砂浆保护层,则去掉所有20mm水泥砂浆楼地面面层的选项.注意,是去除面层,不包括找平层"},
+            {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果, 删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
+        answer=[]
+        if 'A' in json_string and len(A) > 0:
+            answer.append(A[0])
+        if 'B' in json_string and len(A) > 1:
+            answer.append(A[1])
+        if 'C' in json_string and len(A) > 2:
+            answer.append(A[2])
+        if 'D' in json_string and len(A) > 3:
+            answer.append(A[3])
+        if 'E' in json_string and len(A) > 4:
+            answer.append(A[4])
+        if 'F' in json_string and len(A) > 5:
+            answer.append(A[5])
+        if 'G' in json_string and len(A) > 6:
+            answer.append(A[6])
+        if 'H' in json_string and len(A) > 7:
+            answer.append(A[7])
+        if 'I' in json_string and len(A) > 8:
+            answer.append(A[8])
+        if 'J' in json_string and len(A) > 9:
+            answer.append(A[9])
+        return answer
+    completion = sfclient.chat.completions.create(
+        #model="glm-4.5-flash",
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string and len(A) > 0:
+        answer.append(A[0])
+    if 'B' in json_string and len(A) > 1:
+        answer.append(A[1])
+    if 'C' in json_string and len(A) > 2:
+        answer.append(A[2])
+    if 'D' in json_string and len(A) > 3:
+        answer.append(A[3])
+    if 'E' in json_string and len(A) > 4:
+        answer.append(A[4])
+    if 'F' in json_string and len(A) > 5:
+        answer.append(A[5])
+    if 'G' in json_string and len(A) > 6:
+        answer.append(A[6])
+    if 'H' in json_string and len(A) > 7:
+        answer.append(A[7])
+    if 'I' in json_string and len(A) > 8:
+        answer.append(A[8])
+    if 'J' in json_string and len(A) > 9:
+        answer.append(A[9])
+    return answer
+def aifilter2(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+             dw):
+    hit_wumian = False
+    for entry in A:
+        if entry in obj['wumian']:
+            hit_wumian=True
+
+    hit_loumian = False
+    loumian_entry = ''
+    for entry in A:
+        if entry in obj['loumian']:
+            hit_loumian=True
+            loumian_entry = entry
+    if hit_wumian and hit_loumian:
+        return [x for x in A if x != loumian_entry]
+    return A
+
+
+def postprocess011105(selected, data, aiclient, qwclient, sfclient, label_name, name_dw):
+    prime = aifilter1(selected, data, aiclient, qwclient, sfclient, name_dw)
+    time.sleep(1)
+    prime = aifilter1_2(prime, data, aiclient, qwclient, sfclient, name_dw)
+    time.sleep(1)
+    prime = aifilter1_3(prime, data, aiclient, qwclient, sfclient, name_dw)
+    time.sleep(1)
+    prime = [x for x in prime if not '踢脚线' in x]
+    xian = aifilter5(selected, data, aiclient, qwclient, sfclient, name_dw)
+    prime = prime + xian
+    if '界面剂' in data['tz']:##保温
+        if len([ x for x in prime if '第十四章  墙柱面工程 14.1  一般抹灰 14.1.3  保温砂浆及抗裂基层 刷界面剂' in x]) == 0:
+            prime.append('第十四章  墙柱面工程 14.1  一般抹灰 14.1.3  保温砂浆及抗裂基层 刷界面剂 混凝土面') ##需要换
+    return prime

+ 487 - 0
postprocess0112.py

@@ -0,0 +1,487 @@
+import time
+from fallback import fallback
+from config import simplemodel
+from template import xuanxiang
+import json
+with open('zhaoping_rule', 'r') as f:
+    content = f.read()
+import json
+obj=json.loads(content)
+with open('name_label', 'r') as f:
+    content = f.read()
+import json
+name_label=json.loads(content)
+baohuceng = ['10-74', '10-75', '10-77', '10-78', '10-80', '10-81', '10-83', '10-84', '10-86', '10-87', '10-90']
+pair=[
+        [name_label['14-1'],name_label['14-2']],
+        [name_label['14-8'],name_label['14-10']],
+        [name_label['14-9'],name_label['14-11']],
+        [name_label['14-25'],name_label['14-26']],
+        [name_label['14-37'],name_label['14-39']],
+        [name_label['14-38'],name_label['14-40']],
+        [name_label['14-48'],name_label['14-49']],
+        [name_label['14-50'],name_label['14-52']],
+        [name_label['14-51'],name_label['14-53']],
+        
+        ]
+from fallback import fallback
+def aifilter5(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        #model="THUDM/GLM-4-9B-0414",
+        model="glm-4.5-air",
+        #model="Qwen/Qwen3-8B",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": "问题描述: " ",".join(options) + "。请问选项中是否有龙骨选项?" + '''
+                 如果有,请回答
+                 {
+                    'answer': '有'
+                 }
+                 如果没有,请回答
+                 {
+                    'answer': '没有'
+                 }
+             '''
+            },
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if len(json_string.replace(" ", "")) < 10:
+        if '没有' in json_string:
+            return False
+        return True
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个“有”或者“没有”的判断,请将该中文判断输出" + '''
+                 如果有,请回答
+                 {
+                    'answer': '有'
+                 }
+                 如果没有,请回答
+                 {
+                    'answer': '没有'
+                 }
+                 你只需要输出结果,不要输出分析过程
+                 '''
+
+             },                       
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if '没有' in json_string:
+        return False
+    return True
+def aifilter6(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        #model="THUDM/GLM-4-9B-0414",
+        model="glm-4.5-air",
+        #model="Qwen/Qwen3-8B",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] + "。请问该工作内容的描述中有提及混凝土墙、柱面免除抹灰吗?" + '''
+                 如果有提及,请回答
+                 {
+                    'answer': '有'
+                 }
+                 如果没有提及,请回答
+                 {
+                    'answer': '没有'
+                 }
+             '''
+            },
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if len(json_string.replace(" ", "")) < 10:
+        if '没有' in json_string:
+            return False
+        return True
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个“有”或者“没有”的判断,请将该中文判断输出" + '''
+                 如果有,请回答
+                 {
+                    'answer': '有'
+                 }
+                 如果没有,请回答
+                 {
+                    'answer': '没有'
+                 }
+                 你只需要输出结果,不要输出分析过程
+                 '''
+
+             },                       
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if '没有' in json_string:
+        return False
+    return True
+def aifilter3(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        #model="THUDM/GLM-4-9B-0414",
+        model="glm-4.5-air",
+        #model="Qwen/Qwen3-8B",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": "问题描述: 墙面装饰工程可分为外墙装饰或者内墙装饰。给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] + "。请问该工作内容的描述指的是内墙还是外墙?" + '''
+                 如果是外墙,请回答
+                 {
+                    'answer': '外墙'
+                 }
+                 如果是内墙,请回答
+                 {
+                    'answer': '内墙'
+                 }
+                 如果无法确定,请回答
+                 {
+                    'answer': '不确定'
+                 }
+             '''
+            },
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if len(json_string.replace(" ", "")) < 10:
+        if '外墙' in json_string:
+            return False
+        return True
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个“外墙”或者“内墙”的判断,请将该中文判断输出" + '''
+                 如果是外墙,请回答
+                 {
+                    'answer': '外墙'
+                 }
+                 如果是内墙,请回答
+                 {
+                    'answer': '内墙'
+                 }
+                 如果无法确定,请回答
+                 {
+                    'answer': '不确定'
+                 }
+                 你只需要输出结果,不要输出分析过程
+                 '''
+
+             },                       
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if '外墙' in json_string:
+        return False
+    return True
+
+def aifilter4(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 特殊处理要求一:如果工作内容没有提及界面剂,则去掉所有含有“界面剂”字样的选项"},
+            {"role": "user", "content": " 特殊处理要求二:如果选项中既有墙柱面一般抹灰的选项(夹板基层不属于一般抹灰,龙骨不属于一般抹灰,刷界面剂不属于一般抹灰),又有镶贴块料面层及幕墙的选项,则去掉墙柱面一般抹灰的选项"},
+            {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果, 删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
+        answer=[]
+        if 'A' in json_string and len(A) > 0:
+            answer.append(A[0])
+        if 'B' in json_string and len(A) > 1:
+            answer.append(A[1])
+        if 'C' in json_string and len(A) > 2:
+            answer.append(A[2])
+        if 'D' in json_string and len(A) > 3:
+            answer.append(A[3])
+        if 'E' in json_string and len(A) > 4:
+            answer.append(A[4])
+        if 'F' in json_string and len(A) > 5:
+            answer.append(A[5])
+        if 'G' in json_string and len(A) > 6:
+            answer.append(A[6])
+        if 'H' in json_string and len(A) > 7:
+            answer.append(A[7])
+        if 'I' in json_string and len(A) > 8:
+            answer.append(A[8])
+        if 'J' in json_string and len(A) > 9:
+            answer.append(A[9])
+        return answer
+    completion = sfclient.chat.completions.create(
+        #model="glm-4.5-flash",
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string and len(A) > 0:
+        answer.append(A[0])
+    if 'B' in json_string and len(A) > 1:
+        answer.append(A[1])
+    if 'C' in json_string and len(A) > 2:
+        answer.append(A[2])
+    if 'D' in json_string and len(A) > 3:
+        answer.append(A[3])
+    if 'E' in json_string and len(A) > 4:
+        answer.append(A[4])
+    if 'F' in json_string and len(A) > 5:
+        answer.append(A[5])
+    if 'G' in json_string and len(A) > 6:
+        answer.append(A[6])
+    if 'H' in json_string and len(A) > 7:
+        answer.append(A[7])
+    if 'I' in json_string and len(A) > 8:
+        answer.append(A[8])
+    if 'J' in json_string and len(A) > 9:
+        answer.append(A[9])
+    return answer
+def aifilter1(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 特殊处理要求一:去掉所有刷素水泥浆的选项"},
+            {"role": "user", "content": " 特殊处理要求二:去掉所有精确含有“天棚工程”四个字的选项,不得删除含有“天棚及其他”五个字的选项"},
+            {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果, 删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
+        answer=[]
+        if 'A' in json_string and len(A) > 0:
+            answer.append(A[0])
+        if 'B' in json_string and len(A) > 1:
+            answer.append(A[1])
+        if 'C' in json_string and len(A) > 2:
+            answer.append(A[2])
+        if 'D' in json_string and len(A) > 3:
+            answer.append(A[3])
+        if 'E' in json_string and len(A) > 4:
+            answer.append(A[4])
+        if 'F' in json_string and len(A) > 5:
+            answer.append(A[5])
+        if 'G' in json_string and len(A) > 6:
+            answer.append(A[6])
+        if 'H' in json_string and len(A) > 7:
+            answer.append(A[7])
+        if 'I' in json_string and len(A) > 8:
+            answer.append(A[8])
+        if 'J' in json_string and len(A) > 9:
+            answer.append(A[9])
+        return answer
+    completion = sfclient.chat.completions.create(
+        #model="glm-4.5-flash",
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string and len(A) > 0:
+        answer.append(A[0])
+    if 'B' in json_string and len(A) > 1:
+        answer.append(A[1])
+    if 'C' in json_string and len(A) > 2:
+        answer.append(A[2])
+    if 'D' in json_string and len(A) > 3:
+        answer.append(A[3])
+    if 'E' in json_string and len(A) > 4:
+        answer.append(A[4])
+    if 'F' in json_string and len(A) > 5:
+        answer.append(A[5])
+    if 'G' in json_string and len(A) > 6:
+        answer.append(A[6])
+    if 'H' in json_string and len(A) > 7:
+        answer.append(A[7])
+    if 'I' in json_string and len(A) > 8:
+        answer.append(A[8])
+    if 'J' in json_string and len(A) > 9:
+        answer.append(A[9])
+    return answer
+def aifilter2(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+             dw):
+    hit_wumian = False
+    for entry in A:
+        if entry in obj['wumian']:
+            hit_wumian=True
+
+    hit_loumian = False
+    loumian_entry = ''
+    for entry in A:
+        if entry in obj['loumian']:
+            hit_loumian=True
+            loumian_entry = entry
+    if hit_wumian and hit_loumian:
+        return [x for x in A if x != loumian_entry]
+    return A
+
+
+def postprocess0112(selected, data, aiclient, qwclient, sfclient, label_name, name_dw, candidates):
+    hit=[]
+    hit_pair=[]
+    for entry in selected:
+        for p in pair:
+            if p[1] == entry:
+                hit = [entry]
+                hit_pair = p
+    if len(hit)>0:
+        mian = aifilter6(selected, data, aiclient, qwclient, sfclient, name_dw)
+        if mian:
+            selected = [x for x in selected if not x == hit[0]]
+            selected.append(hit_pair[0])
+    selected = [x for x in selected if '木地板' not in x]            
+    selected = [x for x in selected if '铁件安装' not in x]            
+    selected = [x for x in selected if '铁件制作' not in x]            
+    if '防水砂浆' in data['tz'] or '防潮层' in data['tz']:
+        if '第十章  屋面及防水工程 10.2  平面立面及其它防水 10.2.2  防水砂浆 (防水砂浆 1:2)防水砂浆 立面' not in selected:
+            selected.append('第十章  屋面及防水工程 10.2  平面立面及其它防水 10.2.2  防水砂浆 (防水砂浆 1:2)防水砂浆 立面')
+    neiqiang = aifilter3(selected, data, aiclient, qwclient, sfclient, name_dw)
+    if neiqiang and len([x for x in selected if '外墙釉面砖' in x]) > 0:
+        selected = [x for x in selected if not '外墙釉面砖' in x]
+        selected.append('第十四章  墙柱面工程 14.3  镶贴块料面层及幕墙 14.3.1  瓷砖 单块面积0.18m2以内墙砖 砂浆粘贴 墙面')
+    prime = aifilter1(selected, data, aiclient, qwclient, sfclient, name_dw)
+    prime = aifilter4(prime, data, aiclient, qwclient, sfclient, name_dw)
+    if len([x for x in prime if '钢骨架安装' in x or '钢骨架制作' in x or '钉在龙骨上' in x]) > 0:
+        if data['dw'] == 'm2':
+            t = [x for x in prime if '钢骨架安装' not in x and '钢骨架制作' not in x and '钉在龙骨上' not in x]
+            longgu = aifilter5(t, data, aiclient, qwclient, sfclient, name_dw)
+            if not longgu:
+                prime = prime + [name_label['14-180']]
+                prime = [x for x in prime if '钢骨架安装' not in x and '钢骨架制作' not in x]
+            
+    if len(prime) == 0:
+        selected = fallback(candidates, data, aiclient, qwclient, sfclient, None, None)
+        return selected
+    if '界面剂' in data['tz']:
+        if len([x for x in prime if '界面剂' in x])==0:
+            prime.append('第十四章  墙柱面工程 14.1  一般抹灰 14.1.3  保温砂浆及抗裂基层 刷界面剂 混凝土面')
+    return prime

+ 578 - 0
postprocess0113.py

@@ -0,0 +1,578 @@
+import time
+from fallback import fallback
+from config import simplemodel
+from template import xuanxiang
+import json
+with open('zhaoping_rule', 'r') as f:
+    content = f.read()
+import json
+obj=json.loads(content)
+with open('name_label', 'r') as f:
+    content = f.read()
+import json
+name_label=json.loads(content)
+baohuceng = ['10-74', '10-75', '10-77', '10-78', '10-80', '10-81', '10-83', '10-84', '10-86', '10-87', '10-90']
+from fallback import fallback
+def aifilter5(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        #model="THUDM/GLM-4-9B-0414",
+        model="glm-4.5-air",
+        #model="Qwen/Qwen3-8B",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": "问题描述: " ",".join(options) + "。请问选项中是否有龙骨选项?" + '''
+                 如果有,请回答
+                 {
+                    'answer': '有'
+                 }
+                 如果没有,请回答
+                 {
+                    'answer': '没有'
+                 }
+             '''
+            },
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if len(json_string.replace(" ", "")) < 10:
+        if '没有' in json_string:
+            return False
+        return True
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个“有”或者“没有”的判断,请将该中文判断输出" + '''
+                 如果有,请回答
+                 {
+                    'answer': '有'
+                 }
+                 如果没有,请回答
+                 {
+                    'answer': '没有'
+                 }
+                 你只需要输出结果,不要输出分析过程
+                 '''
+
+             },                       
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if '没有' in json_string:
+        return False
+    return True
+def aifilter3(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        #model="THUDM/GLM-4-9B-0414",
+        model="glm-4.5-air",
+        #model="Qwen/Qwen3-8B",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": "问题描述: 墙面装饰工程可分为外墙装饰或者内墙装饰。给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] + "。请问该工作内容的描述指的是内墙还是外墙?" + '''
+                 如果是外墙,请回答
+                 {
+                    'answer': '外墙'
+                 }
+                 如果是内墙,请回答
+                 {
+                    'answer': '内墙'
+                 }
+                 如果无法确定,请回答
+                 {
+                    'answer': '不确定'
+                 }
+             '''
+            },
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if len(json_string.replace(" ", "")) < 10:
+        if '外墙' in json_string:
+            return False
+        return True
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个“外墙”或者“内墙”的判断,请将该中文判断输出" + '''
+                 如果是外墙,请回答
+                 {
+                    'answer': '外墙'
+                 }
+                 如果是内墙,请回答
+                 {
+                    'answer': '内墙'
+                 }
+                 如果无法确定,请回答
+                 {
+                    'answer': '不确定'
+                 }
+                 你只需要输出结果,不要输出分析过程
+                 '''
+
+             },                       
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if '外墙' in json_string:
+        return False
+    return True
+
+def aifilter1_2(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 背景知识:混凝土楼地面是面层,跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土找平层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)找平层跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆面层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆保护层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 特殊处理要求:如果工作内容描述中没有明确提到素水泥浆,则去掉所有含有“素水泥浆”字样的选项"},
+            {"role": "user", "content": " 特殊处理要求:如果工作内容描述中没有明确提到“加浆抹光”,则去掉所有含有“加浆抹光”字样的选项"},
+            {"role": "user", "content": " 特殊处理要求:如果工作内容描述中没有明确提到混凝土垫层,则去掉所有混凝土垫层的选项"},
+            {"role": "user", "content": " 特殊处理要求:如果选项中同时存在“冷轧带肋钢筋”选项和“抗裂基层 热镀锌钢丝网”选项,则去掉热镀锌钢丝网的选项"},
+            {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项.例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
+        answer=[]
+        if 'A' in json_string and len(A) > 0:
+            answer.append(A[0])
+        if 'B' in json_string and len(A) > 1:
+            answer.append(A[1])
+        if 'C' in json_string and len(A) > 2:
+            answer.append(A[2])
+        if 'D' in json_string and len(A) > 3:
+            answer.append(A[3])
+        if 'E' in json_string and len(A) > 4:
+            answer.append(A[4])
+        if 'F' in json_string and len(A) > 5:
+            answer.append(A[5])
+        if 'G' in json_string and len(A) > 6:
+            answer.append(A[6])
+        if 'H' in json_string and len(A) > 7:
+            answer.append(A[7])
+        if 'I' in json_string and len(A) > 8:
+            answer.append(A[8])
+        if 'J' in json_string and len(A) > 9:
+            answer.append(A[9])
+        return answer
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string and len(A) > 0:
+        answer.append(A[0])
+    if 'B' in json_string and len(A) > 1:
+        answer.append(A[1])
+    if 'C' in json_string and len(A) > 2:
+        answer.append(A[2])
+    if 'D' in json_string and len(A) > 3:
+        answer.append(A[3])
+    if 'E' in json_string and len(A) > 4:
+        answer.append(A[4])
+    if 'F' in json_string and len(A) > 5:
+        answer.append(A[5])
+    if 'G' in json_string and len(A) > 6:
+        answer.append(A[6])
+    if 'H' in json_string and len(A) > 7:
+        answer.append(A[7])
+    if 'I' in json_string and len(A) > 8:
+        answer.append(A[8])
+    if 'J' in json_string and len(A) > 9:
+        answer.append(A[9])
+    return answer
+def aifilter1_3(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 背景知识:混凝土楼地面是面层,跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土找平层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)整体面层跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:混凝土(砼)找平层跟混凝土垫层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆面层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 背景知识:水泥砂浆找平层跟水泥砂浆保护层是不同的施工步骤,不得混淆"},
+            {"role": "user", "content": " 特殊处理要求:如果工作内容描述中没有明确提到石材面刷防护剂,则去掉所有含有“石材面刷防护剂”字样的选项"},
+            {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果,删除选项必须对应到明确的特殊处理要求,不要擅自删除选项.例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
+        answer=[]
+        if 'A' in json_string and len(A) > 0:
+            answer.append(A[0])
+        if 'B' in json_string and len(A) > 1:
+            answer.append(A[1])
+        if 'C' in json_string and len(A) > 2:
+            answer.append(A[2])
+        if 'D' in json_string and len(A) > 3:
+            answer.append(A[3])
+        if 'E' in json_string and len(A) > 4:
+            answer.append(A[4])
+        if 'F' in json_string and len(A) > 5:
+            answer.append(A[5])
+        if 'G' in json_string and len(A) > 6:
+            answer.append(A[6])
+        if 'H' in json_string and len(A) > 7:
+            answer.append(A[7])
+        if 'I' in json_string and len(A) > 8:
+            answer.append(A[8])
+        if 'J' in json_string and len(A) > 9:
+            answer.append(A[9])
+        return answer
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string and len(A) > 0:
+        answer.append(A[0])
+    if 'B' in json_string and len(A) > 1:
+        answer.append(A[1])
+    if 'C' in json_string and len(A) > 2:
+        answer.append(A[2])
+    if 'D' in json_string and len(A) > 3:
+        answer.append(A[3])
+    if 'E' in json_string and len(A) > 4:
+        answer.append(A[4])
+    if 'F' in json_string and len(A) > 5:
+        answer.append(A[5])
+    if 'G' in json_string and len(A) > 6:
+        answer.append(A[6])
+    if 'H' in json_string and len(A) > 7:
+        answer.append(A[7])
+    if 'I' in json_string and len(A) > 8:
+        answer.append(A[8])
+    if 'J' in json_string and len(A) > 9:
+        answer.append(A[9])
+    return answer
+def aifilter4(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 特殊处理要求:如果工作内容没有提及界面剂,则去掉所有含有“界面剂”字样的选项"},
+            {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果, 删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
+        answer=[]
+        if 'A' in json_string and len(A) > 0:
+            answer.append(A[0])
+        if 'B' in json_string and len(A) > 1:
+            answer.append(A[1])
+        if 'C' in json_string and len(A) > 2:
+            answer.append(A[2])
+        if 'D' in json_string and len(A) > 3:
+            answer.append(A[3])
+        if 'E' in json_string and len(A) > 4:
+            answer.append(A[4])
+        if 'F' in json_string and len(A) > 5:
+            answer.append(A[5])
+        if 'G' in json_string and len(A) > 6:
+            answer.append(A[6])
+        if 'H' in json_string and len(A) > 7:
+            answer.append(A[7])
+        if 'I' in json_string and len(A) > 8:
+            answer.append(A[8])
+        if 'J' in json_string and len(A) > 9:
+            answer.append(A[9])
+        return answer
+    completion = sfclient.chat.completions.create(
+        #model="glm-4.5-flash",
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string and len(A) > 0:
+        answer.append(A[0])
+    if 'B' in json_string and len(A) > 1:
+        answer.append(A[1])
+    if 'C' in json_string and len(A) > 2:
+        answer.append(A[2])
+    if 'D' in json_string and len(A) > 3:
+        answer.append(A[3])
+    if 'E' in json_string and len(A) > 4:
+        answer.append(A[4])
+    if 'F' in json_string and len(A) > 5:
+        answer.append(A[5])
+    if 'G' in json_string and len(A) > 6:
+        answer.append(A[6])
+    if 'H' in json_string and len(A) > 7:
+        answer.append(A[7])
+    if 'I' in json_string and len(A) > 8:
+        answer.append(A[8])
+    if 'J' in json_string and len(A) > 9:
+        answer.append(A[9])
+    return answer
+def aifilter1(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-z1-flash",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 特殊处理要求一:去掉所有精确含有“刷(喷)浆”字样的选项"},
+            {"role": "user", "content": " 特殊处理要求二:去掉所有刷素水泥浆的选项"},
+            {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果, 删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
+        answer=[]
+        if 'A' in json_string and len(A) > 0:
+            answer.append(A[0])
+        if 'B' in json_string and len(A) > 1:
+            answer.append(A[1])
+        if 'C' in json_string and len(A) > 2:
+            answer.append(A[2])
+        if 'D' in json_string and len(A) > 3:
+            answer.append(A[3])
+        if 'E' in json_string and len(A) > 4:
+            answer.append(A[4])
+        if 'F' in json_string and len(A) > 5:
+            answer.append(A[5])
+        if 'G' in json_string and len(A) > 6:
+            answer.append(A[6])
+        if 'H' in json_string and len(A) > 7:
+            answer.append(A[7])
+        if 'I' in json_string and len(A) > 8:
+            answer.append(A[8])
+        if 'J' in json_string and len(A) > 9:
+            answer.append(A[9])
+        return answer
+    completion = sfclient.chat.completions.create(
+        #model="glm-4.5-flash",
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string and len(A) > 0:
+        answer.append(A[0])
+    if 'B' in json_string and len(A) > 1:
+        answer.append(A[1])
+    if 'C' in json_string and len(A) > 2:
+        answer.append(A[2])
+    if 'D' in json_string and len(A) > 3:
+        answer.append(A[3])
+    if 'E' in json_string and len(A) > 4:
+        answer.append(A[4])
+    if 'F' in json_string and len(A) > 5:
+        answer.append(A[5])
+    if 'G' in json_string and len(A) > 6:
+        answer.append(A[6])
+    if 'H' in json_string and len(A) > 7:
+        answer.append(A[7])
+    if 'I' in json_string and len(A) > 8:
+        answer.append(A[8])
+    if 'J' in json_string and len(A) > 9:
+        answer.append(A[9])
+    return answer
+def aifilter2(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+             dw):
+    hit_wumian = False
+    for entry in A:
+        if entry in obj['wumian']:
+            hit_wumian=True
+
+    hit_loumian = False
+    loumian_entry = ''
+    for entry in A:
+        if entry in obj['loumian']:
+            hit_loumian=True
+            loumian_entry = entry
+    if hit_wumian and hit_loumian:
+        return [x for x in A if x != loumian_entry]
+    return A
+
+
+def postprocess0113(selected, data, aiclient, qwclient, sfclient, label_name, name_dw, candidates):
+    prime = aifilter1(selected, data, aiclient, qwclient, sfclient, name_dw)
+    if len(prime) == 0:
+        selected = fallback(candidates, data, aiclient, qwclient, sfclient, None, None)
+        return selected
+    
+    return prime

+ 414 - 0
postprocess0114.py

@@ -0,0 +1,414 @@
+import time
+from fallback import fallback
+from config import simplemodel
+from template import xuanxiang
+import json
+with open('name_label', 'r') as f:
+    content = f.read()
+name_label=json.loads(content)
+from fallback import fallback
+def aifilter1(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 特殊处理要求一:如果工作内容提及的是内墙,则去掉所有精确含有“外墙涂料”字样的选项"},
+            {"role": "user", "content": " 特殊处理要求二:如果工作内容没有明确提及抗裂腻子,则去掉所有精确含有“抗裂腻子”字样的选项"},
+            {"role": "user", "content": " 特殊处理要求三:如果工作内容没有明确提及批腻子,则去掉所有精确含有“外墙批抗裂腻子”字样的选项"},
+            {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果, 删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
+        answer=[]
+        if 'A' in json_string and len(A) > 0:
+            answer.append(A[0])
+        if 'B' in json_string and len(A) > 1:
+            answer.append(A[1])
+        if 'C' in json_string and len(A) > 2:
+            answer.append(A[2])
+        if 'D' in json_string and len(A) > 3:
+            answer.append(A[3])
+        if 'E' in json_string and len(A) > 4:
+            answer.append(A[4])
+        if 'F' in json_string and len(A) > 5:
+            answer.append(A[5])
+        if 'G' in json_string and len(A) > 6:
+            answer.append(A[6])
+        if 'H' in json_string and len(A) > 7:
+            answer.append(A[7])
+        if 'I' in json_string and len(A) > 8:
+            answer.append(A[8])
+        if 'J' in json_string and len(A) > 9:
+            answer.append(A[9])
+        return answer
+    completion = sfclient.chat.completions.create(
+        #model="glm-4.5-flash",
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string and len(A) > 0:
+        answer.append(A[0])
+    if 'B' in json_string and len(A) > 1:
+        answer.append(A[1])
+    if 'C' in json_string and len(A) > 2:
+        answer.append(A[2])
+    if 'D' in json_string and len(A) > 3:
+        answer.append(A[3])
+    if 'E' in json_string and len(A) > 4:
+        answer.append(A[4])
+    if 'F' in json_string and len(A) > 5:
+        answer.append(A[5])
+    if 'G' in json_string and len(A) > 6:
+        answer.append(A[6])
+    if 'H' in json_string and len(A) > 7:
+        answer.append(A[7])
+    if 'I' in json_string and len(A) > 8:
+        answer.append(A[8])
+    if 'J' in json_string and len(A) > 9:
+        answer.append(A[9])
+    return answer
+
+def handle_nizi(selected, data,aiclient,qwclient,sfclient,label_name,name_dw,candidates):
+    options=[]
+    options_=[]
+    options.append('没有提及腻子')
+    options.append('提及腻子')
+    options.append('提及保温腻子')
+    options.append('腻子2道(遍)')
+    options.append('腻子3道(遍)')
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(options)):
+        options_.append("给定选项" + letters[i]+",内容为"+options[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + data['label'] + " " +  data['mc'] + " " + data['tz'] +  ",".join(options_) + "。请选出与工作内容最匹配的腻子选项。请输出A、B这样的字母作为答案"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len(json_string) < 5:
+        answer='A'
+        if 'B' in json_string:
+            answer='B'
+        if 'C' in json_string:
+            answer='C'
+        if 'D' in json_string:
+            answer='D'
+        if 'E' in json_string:
+            answer='E'
+    else:
+        completion = sfclient.chat.completions.create(
+        #model="glm-4.5-flash",
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将结果以JSON格式输出"},
+            {"role": "user", "content": "问题描述: 给定一段内容: " +  json_string + "。文字中给出了一个类似于A、B的字母作为答案,请输出这个答案。不需要输出分析过程"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+        json_string = completion.choices[0].message.content
+        print(json_string)
+        answer='A'
+        if 'B' in json_string:
+            answer='B'
+        if 'C' in json_string:
+            answer='C'
+        if 'D' in json_string:
+            answer='D'
+        if 'E' in json_string:
+            answer='E'
+    return answer
+def handle_neiqiang(selected, data,aiclient,qwclient,sfclient,label_name,name_dw,candidates):
+    options=[]
+    options_=[]
+    options.append('调和漆')##
+    options.append('乳胶漆(水性水泥漆)')##
+    options.append('砂胶喷涂')##
+    options.append('多彩涂料喷涂')##一整套
+    options.append('浮雕喷涂料')##一整套
+    options.append('喷刷白水泥浆、石灰浆、石灰大白浆')##一整套
+    options.append('水性防霉涂料')##
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(options)):
+        options_.append("给定选项" + letters[i]+",内容为"+options[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + data['label'] + " " +  data['mc'] + " " + data['tz'] +  ",".join(options_) + "。请选出与工作内容最匹配的涂料选项。如果没有特别匹配的选项,则默认选择选项B。请给出你的选择,请输出A、B这样的字母作为答案"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len(json_string) < 5:
+        answer='A'
+        if 'B' in json_string:
+            answer='B'
+        if 'C' in json_string:
+            answer='C'
+        if 'D' in json_string:
+            answer='D'
+        if 'E' in json_string:
+            answer='E'
+        if 'F' in json_string:
+            answer='F'
+        if 'G' in json_string:
+            answer='G'
+    else:
+        completion = sfclient.chat.completions.create(
+        #model="glm-4.5-flash",
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将结果以JSON格式输出"},
+            {"role": "user", "content": "问题描述: 给定一段内容: " +  json_string + "。文字中给出了一个类似于A、B的字母作为答案,请输出这个答案。不需要输出分析过程"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+        json_string = completion.choices[0].message.content
+        print(json_string)
+        answer='A'
+        if 'B' in json_string:
+            answer='B'
+        if 'C' in json_string:
+            answer='C'
+        if 'D' in json_string:
+            answer='D'
+        if 'E' in json_string:
+            answer='E'
+        if 'F' in json_string:
+            answer='F'
+        if 'G' in json_string:
+            answer='G'
+    if answer=='A':
+        return [name_label['17-160']] 
+    if answer=='B':
+        return [name_label['17-177']] 
+    if answer=='C':
+        return [name_label['17-205']] 
+    if answer=='D':
+        return [name_label['17-210']] 
+    if answer=='E':
+        return [name_label['17-220']]
+    if answer=='F':
+        return [name_label['17-224']] 
+    if answer=='G': 
+        return [name_label['17-CB4']]##满刮腻子
+def handle_waiqiang(nizi, selected, data,aiclient,qwclient,sfclient,label_name,name_dw,candidates):
+    options=[]
+    options_=[]
+    options.append('外墙丙烯酸弹性乳胶漆')##不包括腻子,二遍
+    options.append('外墙苯丙乳胶漆')##不包括腻子,二遍
+    options.append('外墙溶剂涂料')##一整套
+    options.append('外墙弹性涂料')##包括两遍腻子,一底二面
+    options.append('外墙批抗裂腻子(不刷涂料)')
+    options.append('外墙彩砂喷涂')##包括腻子二遍,喷面层二遍
+    options.append('喷涂外墙乳液型涂料')##一整套
+    options.append('外墙真石漆')##一整套
+    options.append('浮雕喷涂料外墙')##一整套
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(options)):
+        options_.append("给定选项" + letters[i]+",内容为"+options[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + data['label'] + " " +  data['mc'] + " " + data['tz'] +  ",".join(options_) + "。请选出与工作内容最匹配的外墙涂料选项。如果工作内容只提及外墙涂料,没有更细节的介绍,则选择选项D。请给出你的选择,请输出A、B这样的字母作为答案"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len(json_string) < 5:
+        answer='A'
+        if 'B' in json_string:
+            answer='B'
+        if 'C' in json_string:
+            answer='C'
+        if 'D' in json_string:
+            answer='D'
+        if 'E' in json_string:
+            answer='E'
+        if 'F' in json_string:
+            answer='F'
+        if 'G' in json_string:
+            answer='G'
+        if 'H' in json_string:
+            answer='H'
+        if 'I' in json_string:
+            answer='I'
+    else:
+        completion = sfclient.chat.completions.create(
+        #model="glm-4.5-flash",
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将结果以JSON格式输出"},
+            {"role": "user", "content": "问题描述: 给定一段内容: " +  json_string + "。文字中给出了一个类似于A、B的字母作为答案,请输出这个答案。不需要输出分析过程"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+        json_string = completion.choices[0].message.content
+        print(json_string)
+        answer='A'
+        if 'B' in json_string:
+            answer='B'
+        if 'C' in json_string:
+            answer='C'
+        if 'D' in json_string:
+            answer='D'
+        if 'E' in json_string:
+            answer='E'
+        if 'F' in json_string:
+            answer='F'
+        if 'G' in json_string:
+            answer='G'
+        if 'H' in json_string:
+            answer='H'
+        if 'I' in json_string:
+            answer='I'
+    if answer=='A':
+        return [name_label['NT17-补22']] + nizi
+    if answer=='B':
+        return [name_label['17-192']] + nizi
+    if answer=='C':
+        return [name_label['17-199']] + nizi
+    if answer=='D':
+        return [name_label['17-197']] + nizi
+    if answer=='E':
+        return [name_label['17-195']]
+    if answer=='F':
+        return [name_label['17-202']] + nizi
+    if answer=='G': 
+        return [name_label['17-207']]+ nizi
+    if answer=='H':
+        return [name_label['17-218']] + nizi
+    if answer=='I':
+        return [name_label['17-222']] + nizi
+def handle_mohui(selected, data,aiclient,qwclient,sfclient,label_name,name_dw,candidates):
+    if '外墙' in data['tz']:
+        result = []
+        nizi = handle_nizi(selected, data,aiclient,qwclient,sfclient,label_name,name_dw,candidates)
+        if nizi == 'C':
+            result = [name_label['14-CB1(1)']]
+        if nizi == 'D':
+            result = [name_label['17-164']]
+        if nizi == 'E':
+            result = [name_label['17-164']]
+        return handle_waiqiang(result, selected,data,aiclient,qwclient,sfclient,label_name,name_dw,candidates)
+    return handle_neiqiang(selected,data,aiclient,qwclient,sfclient,label_name,name_dw,candidates)
+def postprocess0114(selected, data, aiclient, qwclient, sfclient, label_name, name_dw, candidates):
+    mu =0
+    jinshu=0
+    mohui=0
+    for entry in selected:
+        if '17.1.1' in entry:
+            mu = mu + 1
+        if '17.1.2' in entry:
+            jinshu = jinshu + 1
+        if '17.1.3' in entry:
+            mohui = mohui + 1
+    if mohui > mu and mohui > jinshu:
+        return handle_mohui(selected,data,aiclient,qwclient,sfclient,label_name,name_dw,candidates)
+    prime = aifilter1(selected, data, aiclient, qwclient, sfclient, name_dw)
+    if len(prime) == 0:
+        selected = fallback(candidates, data, aiclient, qwclient, sfclient, None, None)
+        return selected
+    
+    return prime

+ 116 - 0
postprocess0115.py

@@ -0,0 +1,116 @@
+import time
+from fallback import fallback
+from config import simplemodel
+from template import xuanxiang
+import json
+with open('zhaoping_rule', 'r') as f:
+    content = f.read()
+import json
+obj=json.loads(content)
+with open('name_label', 'r') as f:
+    content = f.read()
+import json
+name_label=json.loads(content)
+baohuceng = ['10-74', '10-75', '10-77', '10-78', '10-80', '10-81', '10-83', '10-84', '10-86', '10-87', '10-90']
+from fallback import fallback
+
+def aifilter1(A, #options
+       B, #data
+       aiclient,
+             qwclient,
+              sfclient,
+             dw):
+    options=[]
+    letters = "ABCDEFGHIJKLMN"
+    for i in range(len(A)):
+        options.append("给定选项" + letters[i]+",内容为"+A[i] )
+
+    completion = aiclient.chat.completions.create(
+        model="glm-z1-flash",
+        #model="THUDM/GLM-Z1-9B-0414",
+        #model="ernie-speed-128k",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 特殊处理要求一:如果工作内容没有提及幕墙,则去掉所有幕墙选项"},
+            {"role": "user", "content": " 重要提示:选项指的是给定的A、B、C之类的选项,不是指的工作内容中的可能的1、2、3这样罗列的特征"},
+            {"role": "user", "content": " 重要提示:除特殊处理要求提及的内容外,不需考虑选项内容与工作内容是否符合,只需要根据特殊处理要求做出处理"},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +  ",".join(options) + "。请根据处理要求做出处理,并返回结果, 删除选项必须对应到明确的特殊处理要求,不要擅自删除选项。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": True},
+        #stream=True
+    )
+    #done_thinking = False
+    #json_string=""
+    #thinking_json_string=""
+    #for chunk in completion:
+    #    thinking_chunk = chunk.choices[0].delta.reasoning_content
+    #    answer_chunk = chunk.choices[0].delta.content
+    #    if thinking_chunk != '':
+    #        thinking_json_string = thinking_json_string +  thinking_chunk
+    #    elif answer_chunk != '':
+    #        if not done_thinking:
+    #            done_thinking = True
+    #        json_string = json_string + answer_chunk
+    json_string = completion.choices[0].message.content
+    #print(completion.choices[0].message.reasoning_content)
+    print(json_string)
+    if len([x for x in json_string if x != ',' and x != '[' and x != ']' and x != ' ' and (x < 'A' or x > 'M')]) < 5:
+        answer=[]
+        if 'A' in json_string and len(A) > 0:
+            answer.append(A[0])
+        if 'B' in json_string and len(A) > 1:
+            answer.append(A[1])
+        if 'C' in json_string and len(A) > 2:
+            answer.append(A[2])
+        if 'D' in json_string and len(A) > 3:
+            answer.append(A[3])
+        if 'E' in json_string and len(A) > 4:
+            answer.append(A[4])
+        if 'F' in json_string and len(A) > 5:
+            answer.append(A[5])
+        if 'G' in json_string and len(A) > 6:
+            answer.append(A[6])
+        if 'H' in json_string and len(A) > 7:
+            answer.append(A[7])
+        if 'I' in json_string and len(A) > 8:
+            answer.append(A[8])
+        if 'J' in json_string and len(A) > 9:
+            answer.append(A[9])
+        return answer
+    completion = sfclient.chat.completions.create(
+        #model="glm-4.5-flash",
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
+        extra_body={"thinking": {"type": "disabled"}},
+        #extra_body={"enable_thinking": False},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer=[]
+    if 'A' in json_string and len(A) > 0:
+        answer.append(A[0])
+    if 'B' in json_string and len(A) > 1:
+        answer.append(A[1])
+    if 'C' in json_string and len(A) > 2:
+        answer.append(A[2])
+    if 'D' in json_string and len(A) > 3:
+        answer.append(A[3])
+    if 'E' in json_string and len(A) > 4:
+        answer.append(A[4])
+    if 'F' in json_string and len(A) > 5:
+        answer.append(A[5])
+    if 'G' in json_string and len(A) > 6:
+        answer.append(A[6])
+    if 'H' in json_string and len(A) > 7:
+        answer.append(A[7])
+    if 'I' in json_string and len(A) > 8:
+        answer.append(A[8])
+    if 'J' in json_string and len(A) > 9:
+        answer.append(A[9])
+    return answer
+
+def postprocess0115(selected, data, aiclient, qwclient, sfclient, label_name, name_dw, candidates):
+    prime = aifilter1(selected, data, aiclient, qwclient, sfclient, name_dw)
+    
+    return prime

+ 251 - 18
postprocess0117.py

@@ -1,15 +1,214 @@
 import json
+from config import simplemodel
+from template import xuanxiang
 with open("cuoshi_corpus.txt", "r") as f:
     content = f.read()
 pairs = content.split("\n")
+with open("name_label", "r") as f:
+    content = f.read()
+label = json.loads(content)
 pair_tuple = []
 i = 0
 while i < len(pairs):
     pair_tuple.append([pairs[i], pairs[i+1]])
     i = i + 2
+chaogao = [label['19-' + str(i+1)][30:] for i in range(18)]
+jiangshui = [
+  [label['22-11'], label['22-12'], label['22-13']],
+  [label['22-14'], label['22-15'], label['22-16']],
+  [label['22-17'], label['22-19'], label['22-20']]
+]
+def handle_jichu(
+       A,
+       B, #data
+       aiclient, sfclient):
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": "问题描述: 设备基础一般可分成块体设备基础(记作A)和框架设备基础(记作B)。给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +   "," + "请判断描述的设备基础属于A还是B,并返回结果。如果无法确定,则返回A。"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer = 'A'
+    if len(json_string) < 5:
+        if 'B' in json_string :
+            answer = 'B'
+    else:
+        completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将结果以json格式输出"},
+            {"role": "user", "content": "问题描述: 给定一段文字: " + json_string + "。文字给出了一个类似于A或者B的英文字母作为答案,请返回这个英文字母结果,不要输出分析过程"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        )
+        json_string = completion.choices[0].message.content
+        print(json_string)
+        answer='A'
+        if 'B' in json_string :
+            answer = 'B'
+    if answer == 'A':
+        return [label['21-14']]
+    else:
+        return [label['21-18']]
+def handle_water(
+       A,
+       B, #data
+       aiclient, sfclient):
+    hit = False
+    hit_group = []
+    for entry in A:
+        if hit:
+            break
+        for group in jiangshui:
+            if entry in group:
+                hit = True
+                hit_group = group
+                break
+    return hit_group
+def handle_chaogao(
+       B, #data
+       aiclient, sfclient):
+    options=[]
+    letters = "ABCDEFGHIJKLMNOPQRST"
+    for i in range(18):
+        options.append("给定选项" + letters[i]+",内容为"+chaogao[i])
+
+    completion = aiclient.chat.completions.create(
+        model="glm-4.5-air",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +   "," + ",".join(options) + "。请选出最匹配的选项,并返回结果。如果无法确定,则选择A。请输出英文字母选项作为答案,不要输出选项对应的文字描述"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    answer = 'A'
+    if len(json_string) < 8:
+        if 'B' in json_string :
+            answer = 'B'
+        if 'C' in json_string :
+            answer = 'C'
+        if 'D' in json_string :
+            answer = 'D'
+        if 'E' in json_string :
+            answer = 'E'
+        if 'F' in json_string :
+            answer = 'F'
+        if 'G' in json_string :
+            answer = 'G'
+        if 'H' in json_string :
+            answer = 'H'
+        if 'I' in json_string :
+            answer = 'I'
+        if 'J' in json_string :
+            answer = 'J'
+        if 'K' in json_string :
+            answer = 'K'
+        if 'L' in json_string :
+            answer = 'L'
+        if 'M' in json_string :
+            answer = 'M'
+        if 'N' in json_string :
+            answer = 'N'
+        if 'O' in json_string :
+            answer = 'O'
+        if 'P' in json_string :
+            answer = 'P'
+        if 'Q' in json_string :
+            answer = 'Q'
+        if 'R' in json_string :
+            answer = 'R'
+    else:
+        completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将结果以json格式输出"},
+            {"role": "user", "content": "问题描述: 给定一段文字: " + json_string + "。文字给出了一个类似于A或者B的英文字母作为答案,请返回这个英文字母结果,不要输出分析过程"},
+        ],
+        extra_body={"thinking": {"type": "disabled"}},
+        )
+        json_string = completion.choices[0].message.content
+        print(json_string)
+        answer='A'
+        if 'B' in json_string :
+            answer = 'B'
+        if 'C' in json_string :
+            answer = 'C'
+        if 'D' in json_string :
+            answer = 'D'
+        if 'E' in json_string :
+            answer = 'E'
+        if 'F' in json_string :
+            answer = 'F'
+        if 'G' in json_string :
+            answer = 'G'
+        if 'H' in json_string :
+            answer = 'H'
+        if 'I' in json_string :
+            answer = 'I'
+        if 'J' in json_string :
+            answer = 'J'
+        if 'K' in json_string :
+            answer = 'K'
+        if 'L' in json_string :
+            answer = 'L'
+        if 'M' in json_string :
+            answer = 'M'
+        if 'N' in json_string :
+            answer = 'N'
+        if 'O' in json_string :
+            answer = 'O'
+        if 'P' in json_string :
+            answer = 'P'
+        if 'Q' in json_string :
+            answer = 'Q'
+        if 'R' in json_string :
+            answer = 'R'
+    if answer == 'A':
+        return [label['19-1']]
+    if answer == 'B':
+        return [label['19-2']]
+    if answer == 'C':
+        return [label['19-3']]
+    if answer == 'D':
+        return [label['19-4']]
+    if answer == 'E':
+        return [label['19-5']]
+    if answer == 'F':
+        return [label['19-6']]
+    if answer == 'G':
+        return [label['19-7']]
+    if answer == 'H':
+        return [label['19-8']]
+    if answer == 'I':
+        return [label['19-9']]
+    if answer == 'J':
+        return [label['19-10']]
+    if answer == 'K':
+        return [label['19-11']]
+    if answer == 'L':
+        return [label['19-12']]
+    if answer == 'M':
+        return [label['19-13']]
+    if answer == 'N':
+        return [label['19-14']]
+    if answer == 'O':
+        return [label['19-15']]
+    if answer == 'P':
+        return [label['19-16']]
+    if answer == 'Q':
+        return [label['19-17']]
+    if answer == 'R':
+        return [label['19-18']] 
+    return [label['19-1']]
 def aifilter(A, #options
        B, #data
-       aiclient):
+       aiclient, sfclient):
     options=[]
     letters = "ABCDEFGHIJKLMN"
     for i in range(len(A)):
@@ -19,42 +218,76 @@ def aifilter(A, #options
         model="glm-4.5-flash",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
-            {"role": "user", "content": " 背景知识:如果工作内容是土石方工程、土方工程等,那么不能选用塔式起重机"},
+            {"role": "user", "content": " 背景知识:如果工作内容是土石方工程、土方工程等,那么不能选用塔式起重机;如果工作内容是土建工程,那么可以使用塔式起重机、施工电梯"},
             {"role": "user", "content": "问题描述: 给定一段工作内容: " + B['label'] + " " +  B['mc'] + " " + B['tz'] +   "," + ",".join(options) + "。请做出筛选,并返回结果。例如,如果处理完后剩余A,B,C三个选项,请返回[A,B,C]"},
         ],
         extra_body={"thinking": {"type": "disabled"}},
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    completion = aiclient.chat.completions.create(
-        model="glm-4.5-flash",
-        messages=[
-            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B,C]的数组作为答案,请将该最终答案输出"},                       
-        ],
+    if len(json_string) < 8:
+        if 'A' in json_string and len(A) > 0:
+            answer.append(A[0])
+        if 'B' in json_string and len(A) > 1:
+            answer.append(A[1])
+        if 'C' in json_string and len(A) > 2:
+            answer.append(A[2])
+        if 'D' in json_string and len(A) > 3:
+            answer.append(A[3])
+        if 'E' in json_string and len(A) > 4:
+            answer.append(A[4])
+        if 'F' in json_string and len(A) > 5:
+            answer.append(A[5])
+        if 'G' in json_string and len(A) > 6:
+            answer.append(A[6])
+        return answer
+    completion = sfclient.chat.completions.create(
+        model=simplemodel(),
+        messages=xuanxiang(json_string),
         extra_body={"thinking": {"type": "disabled"}},
     )
     json_string = completion.choices[0].message.content
     print(json_string)
     answer=[]
-    if 'A' in json_string:
+    if 'A' in json_string and len(A) > 0:
         answer.append(A[0])
-    if 'B' in json_string:
+    if 'B' in json_string and len(A) > 1:
         answer.append(A[1])
-    if 'C' in json_string:
+    if 'C' in json_string and len(A) > 2:
         answer.append(A[2])
-    if 'D' in json_string:
+    if 'D' in json_string and len(A) > 3:
         answer.append(A[3])
-    if 'E' in json_string:
+    if 'E' in json_string and len(A) > 4:
         answer.append(A[4])
-    if 'F' in json_string:
+    if 'F' in json_string and len(A) > 5:
         answer.append(A[5])
-    if 'G' in json_string:
+    if 'G' in json_string and len(A) > 6:
         answer.append(A[6])
-    if 'H' in json_string:
+    if 'H' in json_string and len(A) > 7:
         answer.append(A[7])
     return answer
-def postprocess0117(selected, data, aiclient):
+def postprocess0117(selected, data, aiclient, sfclient):
+    water = 0
+    non_water = 0
+    for entry in selected:
+        if '施工排水' in entry:
+            water = water + 1
+        else:
+            non_water = non_water + 1
+    if water > non_water:
+        return handle_water(selected, data, aiclient, sfclient)
+    jichu = 0
+    non_jichu = 0
+    for entry in selected:
+        if '设备基础' in entry:
+            jichu = jichu + 1
+        else:
+            non_jichu = non_jichu + 1
+    if jichu > non_jichu:
+        return handle_jichu(selected, data, aiclient, sfclient)
+
+    if '超高增加' in data['mc'] or '超高增加' in data['tz'] or '超高施工增加' in data['mc'] or '超高施工增加' in data['tz']:
+        return handle_chaogao(data, aiclient, sfclient)
     correct=[]
     for entry in selected:
         correct.append(entry)
@@ -62,4 +295,4 @@ def postprocess0117(selected, data, aiclient):
             if entry in item:
                 correct = correct + item
     correct = list(set(correct))
-    return aifilter(correct, data, aiclient)
+    return aifilter(correct, data, aiclient, sfclient)

+ 60 - 3
server.js

@@ -5,7 +5,7 @@ const app = express();
 app.use(express.json());
 import Service from './Service.js';
 import {copy} from './utils.js';
-
+import {handleBeizhu} from './editor.js';
 app.post('/api/transform', (req, res) => {
    let data = copy(req.body);
    Service.setQufei([{ "children": [{"children":[{
@@ -48,10 +48,67 @@ app.post('/api/transform', (req, res) => {
    for (let i = 0; i < result.length; i++) {
        console.log(result[i][0])
        console.log(result[i][1])
+       //console.log(JSON.parse(result[i][2]))
+       let selected = data['fuzhu'][i][0]
+	let selected2=[]
+	  for(let j = 0; j < selected.length; j++){
+	  selected2.push(selected[j].charCodeAt(0) - 64)
+	  }
+       let fk = data['fuzhu'][i][1]
+       let zk = data['fuzhu'][i][2]
+	let zk2 = []
+	 for(let j = 0;j<zk.length;j++){
+	 zk2.push({
+	  'id': j+1,
+	  'key': j+1,
+	  '序号':j+1,
+	  '数量':1,
+	   '编号':zk[j][1],
+	   '说明':zk[j][2]
+	 })
+	 }
        let r = Service.changguidinge(JSON.parse(result[i][2]), "1")
-       r = Service.updateShuliang(result[i][1], r[2])
+       if (selected2.length>0){
+         let detail = handleBeizhu(fk,r[2],selected2,zk2)
+         Service.updateBeizhu(r[2],detail,selected2.map(x=>x.toString().concat('*1')))
+       }
+       let current = Service.current()[0]
+       current = current['_children'].filter(x=>x['key']==r[2])[0]
+       current = current['dercj']
+       let shajiang = false
+       for(let j=1;j<current.length;j++){
+            if(current[j][2].includes('灰浆搅拌机'))shajiang=true;
+       }
+	   
+       if (shajiang){
+           Service.handleYuban(r[2], ['2']);
+       }
+       let r_ = Service.updateShuliang(result[i][1], r[2])
+        let hit = r_[1][0]['_children'].filter(x=>x['key']==r[2])[0]
+       let dercj = copy(hit['dercj'])
+       for(let j = 1; j < dercj.length; j++){
+            let bianma = dercj[j][1]
+	    if (data['replace'].hasOwnProperty(bianma)){
+	      dercj[j][2] = data['replace'][bianma] 
+            }
+	    for(let k = 0; k < result[i][3].length;k++){
+	        let old_bianma = result[i][3][k]['bianma']
+	        let new_bianma = result[i][3][k]['new_bianma']
+	        let new_mc = result[i][3][k]['mc']
+	        let new_jg = result[i][3][k]['jg']
+		if (old_bianma == bianma){
+		  dercj[j][1] = new_bianma
+		  dercj[j][2] = new_mc
+		  dercj[j][5] = new_jg
+			
+		}
+	    }
+
+       }
+       Service.updateDercj(r[2], dercj)
    }
-   if (data['extra'].length > 4) {
+   console.log(data['extra']);
+   if (data['extra'].length >= 4) {
         Service.danxiangdinge("1", data['n'], data['extra'])
    }
    //console.log(Service.cache)

+ 2 - 0
service.py

@@ -1583,6 +1583,7 @@ def getClxl(name, zhuanye):
 
 
 def getSingleDeXilie(zhuanye, debh):
+    print(debh)
     if zhuanye == 10:
         A1, A2, A3, A4, A5, A6, A7 = getSingleDeXilie_tj(debh)
         if A1:
@@ -2743,6 +2744,7 @@ def getSingleDeXilie_tj(debh):
         position = debh.find("[")
         debh = debh[:position-1]
     gj = pd.read_csv("de/tj/JD_DanWeiGJ.csv")
+    print(debh)
     filtered = gj[gj["DEBH"]==debh]
     ##print(filtered)
     if len(filtered) > 0:

+ 329 - 64
tasks.py

@@ -1,6 +1,13 @@
 import time
+from tihuan import tihuan
+from fuzhu_util import fuzhu_util
+from f_youqi import f_youqi
+import re
+from config import simplemodel
+from template import expression
 from fallback import fallback
 from dianceng import dianceng
+from mianceng import mianceng
 from lingji import lingji
 from jieheceng import jieheceng
 from celery_app import celery_app
@@ -10,11 +17,14 @@ from huansuan import callzaihuansuan
 import json
 import dedata
 import chromadb
+print("chroma")
 client = chromadb.HttpClient(host='47.101.198.30',port=8000)
-collection = client.get_or_create_collection(name="tj_de_bge")
+collection = client.get_or_create_collection(name="tj_de_bge", metadata={'hnsw:search_ef':15})
 cuoshi_collection = client.get_or_create_collection(name="tj_cuoshi_bge")
 menchuang_collection = client.get_or_create_collection(name="tj_menchuang_bge")
+qita_collection = client.get_or_create_collection(name="tj_qita_bge")
 from FlagEmbedding import FlagModel
+print("model")
 model = FlagModel('/mnt/d/Develop/bge/test2_encoder_only_base_bge-large-zh-v1.5')
 cuoshi_model = FlagModel('cuoshi_encoder_only_base_bge-large-zh-v1.5/cuoshi_encoder_only_base_bge-large-zh-v1.5')
 from sentence_transformers import CrossEncoder
@@ -45,8 +55,11 @@ name_label = json.loads(content)
 with open("name_dw", "r") as f:
     content = f.read()
 name_dw = json.loads(content)
+with open("fuzhu_candidate", "r") as f:
+    content = f.read()
+fuzhu_candidate = json.loads(content)
 THRESHOLD=0.8####adjust it
-
+print("client")
 import os
 from openai import OpenAI
 import requests
@@ -55,6 +68,7 @@ aiclient = OpenAI(
     api_key=os.getenv("ZAI_API_KEY"),
     #base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
     base_url="https://open.bigmodel.cn/api/paas/v4/",
+    timeout=60
 )
 qwclient = OpenAI(
     # 若没有配置环境变量,请用百炼API Key将下行替换为:api_key="sk-xxx",
@@ -62,6 +76,7 @@ qwclient = OpenAI(
     api_key=os.getenv("MS_API_KEY"),
     #base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
     base_url="https://api-inference.modelscope.cn/v1/",
+    timeout=60
 )
 hyclient = OpenAI(
     # 若没有配置环境变量,请用百炼API Key将下行替换为:api_key="sk-xxx",
@@ -69,25 +84,56 @@ hyclient = OpenAI(
     api_key=os.getenv("HY_API_KEY"),
     #base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
     base_url="https://api.hunyuan.cloud.tencent.com/v1",
+    timeout=60
 )
-bdclient = OpenAI(
+dsclient = OpenAI(
     # 若没有配置环境变量,请用百炼API Key将下行替换为:api_key="sk-xxx",
     #api_key=os.getenv("DASHSCOPE_API_KEY"),
-    api_key=os.getenv("BD_API_KEY"),
+    api_key=os.getenv("DS_API_KEY"),
     #base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
-    base_url="https://qianfan.baidubce.com/v2",
+    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
+    timeout=60
 )
+##silicon flow不能用,超级慢,垃圾
 sfclient = OpenAI(
     # 若没有配置环境变量,请用百炼API Key将下行替换为:api_key="sk-xxx",
     #api_key=os.getenv("DASHSCOPE_API_KEY"),
     api_key=os.getenv("SF_API_KEY"),
     #base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
     base_url="https://api.siliconflow.cn/v1",
+    timeout=60
 )
+ssclient = OpenAI(
+    api_key=os.getenv("SS_API_KEY"),  # 此处传token,不带Bearer
+    base_url="https://chat.intern-ai.org.cn/api/v1/",
+    timeout=60
+)
+simpleclient = ssclient
 
 
-
-def callzaikuailiao(data):
+def has_chinese(text):
+    pattern = re.compile(r'[\u4e00-\u9fff]+')
+    result = pattern.search(text)
+    return True if result else False
+def analyze(json_string):
+    obj = json.loads(json_string)
+    bz = None
+    if 'bz_selected' in obj:
+        bz = obj['bz_selected']
+    bh = bz['BZBH']
+    bh_list = []
+    for entry in bh:
+        bh_list.append([entry, bh[entry], bz['SM'][entry]])
+    print(bh_list)
+    return bh_list,obj['bz_selected2']
+def select_fuzhu(input, label, aiclient, qwclient, sfclient):
+    if label_name[input[0]] in fuzhu_candidate:
+        print(input[0])
+        print(input[1][0])
+        print(input[1][1])
+        return fuzhu_util(input[0], [x[2] for x in input[1][0]], label, aiclient, qwclient, sfclient),input[1][1],input[1][0]
+    return [],{},[]
+def callzailonggu_0113(data):
     time.sleep(1)
     completion = aiclient.chat.completions.create(
         # 模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
@@ -96,7 +142,7 @@ def callzaikuailiao(data):
         #model="qwen3-4b",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
-        {"role": "user", "content": " 楼地面有多种做法,包括整体面层,块料面层,木地板等。块料面层常见的有石材块料面板,缸砖,马赛克,假麻石块,地砖,橡胶塑料板等。 现在给定一工作内容如下: " + data['label'] + " " + data['mc'] + " " + data['tz'] + ", 计量单位为" + data['dw'] + ", 请问该工作内容中包括了块料面层施工吗?"},
+        {"role": "user", "content": " 天棚工程有多种做法,包括单纯抹灰,单纯天棚面层,也可以制作龙骨并在龙骨上安装面层等。龙骨包括方木龙骨,轻钢龙骨,铝合金龙骨等。 现在给定一工作内容如下: " + data['label'] + " " + data['mc'] + " " + data['tz'] + ", 计量单位为" + data['dw'] + ", 请问该工作内容中包括了龙骨吗?"},
         ],
         # Qwen3模型通过enable_thinking参数控制思考过程(开源版默认True,商业版默认False)
         # 使用Qwen3开源版模型时,若未启用流式输出,请将下行取消注释,否则会报错
@@ -106,13 +152,16 @@ def callzaikuailiao(data):
     json_string = completion.choices[0].message.content
     print(json_string)
     time.sleep(1)
-    completion = sfclient.chat.completions.create(
+    completion = simpleclient.chat.completions.create(
         # 模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
-        model="THUDM/GLM-4-9B-0414",
-        #model="glm-4.5-flash",
+        model=simplemodel(),
         messages=[
             {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了关于是否包括块料面层施工的判断,请将该判断输出。请输出是或者否"},                       
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了关于是否包括龙骨的判断,请将该判断输出。 例如:"+'''
+                 {
+		'判断':'不包括'
+		}
+             '''},                       
         ],
         # Qwen3模型通过enable_thinking参数控制思考过程(开源版默认True,商业版默认False)
         # 使用Qwen3开源版模型时,若未启用流式输出,请将下行取消注释,否则会报错
@@ -122,14 +171,16 @@ def callzaikuailiao(data):
     json_string = completion.choices[0].message.content
     print(json_string)
     return json_string
-def callzaiclarify(data):
+def callzaikuailiao_0112(data):
+    time.sleep(1)
     completion = aiclient.chat.completions.create(
         # 模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
+        #model="ZhipuAI/GLM-4.5",
         #model="qwen3-4b",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
-        {"role": "user", "content": "  管桩的清单工作内容描述可以分成两类,一类是打桩、压桩,属于桩基工程的一种,其工作内容计量的单位一般是米(m)或根。另一类是填芯,一般是向桩芯内浇混凝土,属于土建工程的一种,其配套的计量单位一般是立方米(m3),即浇混凝土的体积量。现在给定一工作内容如下: " + data['label'] + " " + data['mc'] + " " + data['tz'] + ", 计量单位为" + data['dw'] + ", 请问该工作内容属于填芯吗?"},
+        {"role": "user", "content": " 墙柱面有多种做法,包括单纯抹灰,镶贴块料面层,使用墙板等。块料面层常见的有石材块料面板,瓷砖,面砖,假麻石块等。 现在给定一工作内容如下: " + data['label'] + " " + data['mc'] + " " + data['tz'] + ", 计量单位为" + data['dw'] + ", 请问该工作内容中包括了块料面层施工吗?"},
         ],
         # Qwen3模型通过enable_thinking参数控制思考过程(开源版默认True,商业版默认False)
         # 使用Qwen3开源版模型时,若未启用流式输出,请将下行取消注释,否则会报错
@@ -138,13 +189,17 @@ def callzaiclarify(data):
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    completion = sfclient.chat.completions.create(
+    time.sleep(1)
+    completion = simpleclient.chat.completions.create(
         # 模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
-        model="THUDM/GLM-4-9B-0414",
-        #model="glm-4.5-flash",
+        model=simplemodel(),
         messages=[
             {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了关于是不是填芯的判断,请将该判断输出。请输出是或者否"},                       
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了关于是否包括块料面层施工的判断,请将该判断输出。 例如:"+'''
+                 {
+		'判断':'不包括'
+		}
+             '''},                       
         ],
         # Qwen3模型通过enable_thinking参数控制思考过程(开源版默认True,商业版默认False)
         # 使用Qwen3开源版模型时,若未启用流式输出,请将下行取消注释,否则会报错
@@ -154,16 +209,16 @@ def callzaiclarify(data):
     json_string = completion.choices[0].message.content
     print(json_string)
     return json_string
-def callzaidw(A,B):
+def callzaikuailiao(data):
     time.sleep(1)
-    completion = qwclient.chat.completions.create(
+    completion = aiclient.chat.completions.create(
         # 模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
-        #model="glm-4.5-flash",
-        model="ZhipuAI/GLM-4.5",
+        model="glm-4.5-flash",
+        #model="ZhipuAI/GLM-4.5",
         #model="qwen3-4b",
         messages=[
             {"role": "system", "content": "You are a helpful assistant."},
-        {"role": "user", "content": "  计量单位可以用名称或者符号表示,常用的符号包括表示米的符号m,表示千米的符号km,表示吨的符号t,表示千克的符号kg,表示平方米的符号m2,表示立方米的符号m3。也有计量单位很宽泛,比如“项”、“次”. 给定一个工作量计量单位,内容为" + A + ",记作A,再给定一个工作量计量单位,内容为" + B + ",记作B。若两个单位相等,请返回A=B。例如,“项”跟“次”是等价的,应返回A=B。若两个单位不相等,但是存在比例换算关系,请返回比例换算关系,例如A单位是m,B单位是10m, 则返回A=0.1*B。再例如,A单位是10m2,B单位是m2,则返回A=10*B。再例如,A单位是m3, B单位是1000m3,则返回A=0.001*B。若两个单位不相等,且不存在比例换算关系,请返回A<>B,例如A单位是m,B单位是m2,一个表示长度,一个表示面积,不存在比例关系,则返回A<>B。 "},
+        {"role": "user", "content": " 楼地面有多种做法,包括整体面层,块料面层,木地板等。块料面层常见的有石材块料面板,缸砖,马赛克,假麻石块,地砖,橡胶塑料板等。 现在给定一工作内容如下: " + data['label'] + " " + data['mc'] + " " + data['tz'] + ", 计量单位为" + data['dw'] + ", 请问该工作内容中包括了块料面层施工吗?"},
         ],
         # Qwen3模型通过enable_thinking参数控制思考过程(开源版默认True,商业版默认False)
         # 使用Qwen3开源版模型时,若未启用流式输出,请将下行取消注释,否则会报错
@@ -173,13 +228,16 @@ def callzaidw(A,B):
     json_string = completion.choices[0].message.content
     print(json_string)
     time.sleep(1)
-    completion = qwclient.chat.completions.create(
+    completion = simpleclient.chat.completions.create(
         # 模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
-        model="ZhipuAI/GLM-4.5",
-        #model="glm-4.5-flash",
+        model=simplemodel(),
         messages=[
             {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A=B的表达式作为答案,请将该最终答案输出"},                       
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了关于是否包括块料面层施工的判断,请将该判断输出。 例如:"+'''
+                 {
+		'判断':'不包括'
+		}
+             '''},                       
         ],
         # Qwen3模型通过enable_thinking参数控制思考过程(开源版默认True,商业版默认False)
         # 使用Qwen3开源版模型时,若未启用流式输出,请将下行取消注释,否则会报错
@@ -189,16 +247,14 @@ def callzaidw(A,B):
     json_string = completion.choices[0].message.content
     print(json_string)
     return json_string
-
-
-def callzai(A,B,C):
+def youqi_(tz):
     completion = aiclient.chat.completions.create(
         # 模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
-        model="glm-4.5-flash",
+        model="glm-4.5-air",
         #model="qwen3-4b",
         messages=[
-            {"role": "system", "content": "You are a helpful assistant."},
-            {"role": "user", "content": " 给定一条主定额,内容为" + A + ",记作A,再给定一条配套定额,内容为" + B + ",记作B。可以通过简单的组合,来表达对应的工作内容的数量,比如A+B可以表达,主定额的工作量加上配套定额的工作量;再比如,A+B*2可以表达, 主定额的工作量加上两倍的配套定额的工作量;再比如,A+B*(-2)可以表达, 主定额减去两倍的配套定额的工作量;再比如,A可以表示,不使用配套定额,仅表示主定额的工作量。现在给你一条工程量清单,内容为" + C + ",该条清单包含了主定额描述的工作内容,但是数量并不一定一致。请你组合A与B,表示出清单描述的对应工作数量。请输出类似A+B、A+B*2、A-B*2的格式,不要输出A+2*B、A-2*B的格式。如果清单里相应工作量的描述不明确,请输出A作为答案 "},
+            {"role": "system", "content": "You are a helpful assistant.请将结果以json格式输出"},
+        {"role": "user", "content": " 金属结构的油漆一般是多层涂刷,常规的是包括底漆跟面漆。也可能是底漆,中间漆再加面漆。金属结构有时需要涂刷防火涂料。但是防火涂料跟油漆不是一回事。现在给定一工作内容如下: " + tz + ", 请将该工作内容中的油漆相关内容原封不动地抽取出来。请勿抽取防火涂料相关内容。如果工作内容中完全没有提到油漆,则返回”无“"},
         ],
         # Qwen3模型通过enable_thinking参数控制思考过程(开源版默认True,商业版默认False)
         # 使用Qwen3开源版模型时,若未启用流式输出,请将下行取消注释,否则会报错
@@ -207,15 +263,117 @@ def callzai(A,B,C):
     )
     json_string = completion.choices[0].message.content
     print(json_string)
-    if len(json_string) < 4:
-        return json_string
+    return json_string
+def callzaiclarify(data):
     completion = aiclient.chat.completions.create(
         # 模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
+        model="glm-4.5-air",
         #model="qwen3-4b",
-        model="glm-4.5-flash",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+        {"role": "user", "content": "  管桩的清单工作内容描述可以分成两类,一类是打桩、压桩,属于桩基工程的一种,其工作内容计量的单位一般是米(m)或根。另一类是填芯,一般是向桩芯内浇混凝土,属于土建工程的一种,其配套的计量单位一般是立方米(m3),即浇混凝土的体积量。现在给定一工作内容如下: " + data['label'] + " " + data['mc'] + " " + data['tz'] + ", 计量单位为" + data['dw'] + ", 请问该工作内容属于填芯吗?"},
+        ],
+        # Qwen3模型通过enable_thinking参数控制思考过程(开源版默认True,商业版默认False)
+        # 使用Qwen3开源版模型时,若未启用流式输出,请将下行取消注释,否则会报错
+        #extra_body={"enable_thinking": False},
+        extra_body={"thinking": {"type": "disabled"}},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    completion = simpleclient.chat.completions.create(
+        # 模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
+        model=simplemodel(),
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了关于是不是填芯的判断,请将该判断输出。请输出是或者否"},                       
+        ],
+        # Qwen3模型通过enable_thinking参数控制思考过程(开源版默认True,商业版默认False)
+        # 使用Qwen3开源版模型时,若未启用流式输出,请将下行取消注释,否则会报错
+        #extra_body={"enable_thinking": False},
+        extra_body={"thinking": {"type": "disabled"}},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    return json_string
+def callzaidw(A,B):
+    time.sleep(1)
+    completion = dsclient.chat.completions.create(
+        # 模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
+        model='glm-4.5-air',
+        #model="modelscope.cn/unsloth/GLM-Z1-32B-0414-GGUF",
+        stream=True,
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+        {"role": "user", "content": "  计量单位可以用名称或者符号表示,常用的符号包括表示米的符号m,表示千米的符号km,表示吨的符号t,表示千克的符号kg,表示平方米的符号m2,表示立方米的符号m3。也有计量单位很宽泛,比如“项”、“次”. 给定一个工作量计量单位,内容为" + A + ",记作A,再给定一个工作量计量单位,内容为" + B + ",记作B。若两个单位相等,请返回A=B。例如,“项”跟“次”是等价的,应返回A=B。若两个单位不相等,但是存在比例换算关系,请返回比例换算关系,例如A单位是m,B单位是10m, 则返回A=0.1*B。再例如,A单位是10m2,B单位是m2,则返回A=10*B。再例如,A单位是m3, B单位是1000m3,则返回A=0.001*B。若两个单位不相等,且不存在比例换算关系,请返回A<>B,例如A单位是m,B单位是m2,一个表示长度,一个表示面积,不存在比例关系,则返回A<>B。 "},
+        ],
+        # Qwen3模型通过enable_thinking参数控制思考过程(开源版默认True,商业版默认False)
+        # 使用Qwen3开源版模型时,若未启用流式输出,请将下行取消注释,否则会报错
+        extra_body={"enable_thinking": False},
+        #extra_body={"thinking": {"type": "disabled"}},
+    )
+    is_answering = False  # 是否进入回复阶段
+    print("\n" + "=" * 20 + "思考过程" + "=" * 20)
+    json_string = ""
+    for chunk in completion:
+        delta = chunk.choices[0].delta
+        if hasattr(delta, "reasoning_content") and delta.reasoning_content is not None:
+            if not is_answering:
+                print(delta.reasoning_content, end="", flush=True)
+        if hasattr(delta, "content") and delta.content:
+            if not is_answering:
+                print("\n" + "=" * 20 + "完整回复" + "=" * 20)
+                is_answering = True
+            json_string = json_string + delta.content#, end="", flush=True)
+    #json_string = completion.choices[0].message.content
+    print(json_string)
+    time.sleep(1)
+    completion = simpleclient.chat.completions.create(
+        # 模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
+        #model="THUDM/GLM-4-9B-0414",
+        model=simplemodel(),
+        #model="glm-4.5-flash",
+        messages=expression(json_string),
+        # Qwen3模型通过enable_thinking参数控制思考过程(开源版默认True,商业版默认False)
+        # 使用Qwen3开源版模型时,若未启用流式输出,请将下行取消注释,否则会报错
+        #extra_body={"enable_thinking": False},
+        extra_body={"thinking": {"type": "disabled"}},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    return json_string
+
+
+def callzai(A,B,C):
+    completion = qwclient.chat.completions.create(
+        # 模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
+        #model="glm-4.5-flash",
+        model="Qwen/Qwen3-32B",
+        messages=[
+            {"role": "system", "content": "You are a helpful assistant."},
+            {"role": "user", "content": " 给定一条主定额,内容为" + A + ",记作A,再给定一条配套定额,内容为" + B + ",记作B。可以通过简单的组合,来表达对应的工作内容的数量,比如A+B可以表达,主定额的工作量加上配套定额的工作量;再比如,A+B*2可以表达, 主定额的工作量加上两倍的配套定额的工作量;再比如,A+B*(-2)可以表达, 主定额减去两倍的配套定额的工作量;再比如,A可以表示,不使用配套定额,仅表示主定额的工作量。现在给你一条工程量清单,内容为" + C + ",该条清单包含了主定额描述的工作内容,但是数量并不一定一致。请你找到清单中对应的描述(不需要考虑混凝土强度等级是否完全一致,需要区分面层和找平找坡层),辨识出工作数量(砂浆保护层厚度跟砂浆找平层厚度不可合并),并组合A与B,表示出清单描述的对应工作数量。请给出分析过程,并请最终输出类似A+B、A+B*2、A-B*2的格式,不要输出A+2*B、A-2*B的格式。如果清单里相应工作量的描述不明确,请输出A作为答案 "},
+        ],
+        # Qwen3模型通过enable_thinking参数控制思考过程(开源版默认True,商业版默认False)
+        # 使用Qwen3开源版模型时,若未启用流式输出,请将下行取消注释,否则会报错
+        extra_body={"enable_thinking": False},
+        #extra_body={"thinking": {"type": "disabled"}},
+    )
+    json_string = completion.choices[0].message.content
+    print(json_string)
+    if len(json_string) < 4:
+        return "result:" + json_string
+    completion = simpleclient.chat.completions.create(
+        # 模型列表:https://help.aliyun.com/zh/model-studio/getting-started/models
+        model=simplemodel(),
+        #model="THUDM/GLM-4-9B-0414",
         messages=[
             {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
-            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A+B的表达式作为答案,请将该最终答案输出"},                       
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A+B的表达式作为答案,请将该最终答案输出.如果文字中指出可能有两种答案,则只需要输出其中一种。例如" +'''
+            [
+              'result' : 'A+B*-2'
+            }
+            注意,只需要输出最终答案,不需要中间过程或者其他细节,只需要输出一种答案
+             '''
+             },                       
         ],
         # Qwen3模型通过enable_thinking参数控制思考过程(开源版默认True,商业版默认False)
         # 使用Qwen3开源版模型时,若未启用流式输出,请将下行取消注释,否则会报错
@@ -230,15 +388,20 @@ def transform(answer, input, entry):
     answers = [x for x in answers if ':' in x and "A" in x]
     if len(answers) == 0:
         return input
+    answers[0] = answers[0].split(',')[0]
     answer2 = answers[0].split(":")[1].replace(" ", "")
-    answer2 = answer2.replace("A", input)
-    answer2 = answer2.replace("B", "["+entry+"]")
+    if 'B' in answer2:
+        answer2 = answer2.replace("B", 'BBB')
+        answer2 = answer2.replace("A", input)
+        answer2 = answer2.replace("BBB", "["+entry+"]")
+    else:
+        answer2 = answer2.replace("A", input)
     answer2 = answer2.replace("\'", "")
     answer2 = answer2.replace("\"", "")
     return answer2
 
 def zuhe(input, content):
-    if input in incremental:
+    if input in incremental and input != '17-176' and input != '17-177' and input != '17-179' and input != '17-180' and input != '17-181':
         option = incremental[input]
         for entry in option:
             answer = callzai(name_label[input], name_label[entry], content)
@@ -256,10 +419,12 @@ def huansuan_highlevel(bianma, label, input,dw, tz):
         dw1 = input
         dw1 = dw1.lower()
         dw1 = dw1.replace("水平投影面积", "")
+        dw1 = dw1.replace("每1m2建筑面积", "m2")
         dw2 = name_dw[dw]
         dw2 = dw2.lower()
         dw2 = dw2.replace("水平投影面积", "")
-        answer = callzaihuansuan(bianma, label, dw1,dw2,tz, aiclient, qwclient)
+        dw2 = dw2.replace("每1m2建筑面积", "m2")
+        answer = callzaihuansuan(bianma, label, dw1,dw2,tz, aiclient, qwclient, simpleclient)
         answers = answer.split("\n")
         answers = [x for x in answers if ':' in x and "A" in x]
         answers = [x for x in answers if '=' in x or '<>' in x]
@@ -294,16 +459,35 @@ def huansuan(input, dw):
     dw1 = input
     dw1 = dw1.lower()
     dw1 = dw1.replace("水平投影面积", "")
+    dw1 = dw1.replace("每1m2建筑面积", "m2")
     dw2 = name_dw[dw]
     dw2 = dw2.lower()
     dw2 = dw2.replace("水平投影面积", "")
+    dw2 = dw2.replace("每1m2建筑面积", "m2")
     if dw1 == dw2:
         return 1
+    if dw1 == 'm2' and dw2 == '10m2':
+        return 0.1
+    if dw1 == 'm2' and dw2 == '100m2':
+        return 0.01
+    if dw1 == 'm2' and dw2 == '1000m2':
+        return 0.001
+    if dw1 == 'm' and dw2 == '10m':
+        return 0.1
+    if dw1 == 'm' and dw2 == '100m':
+        return 0.01
+    if dw1 == 'm3' and dw2 == '10m3':
+        return 0.1
+    if dw1 == 'm3' and dw2 == '100m3':
+        return 0.01
+    if dw1 == 'm3' and dw2 == '1000m3':
+        return 0.001
     else:
         answer = callzaidw(dw1,dw2)
         answers = answer.split("\n")
         answers = [x for x in answers if ':' in x and "A" in x]
         answers = [x for x in answers if '=' in x or '<>' in x]
+        answers = [x for x in answers if not has_chinese(x)]
         print(answers)
         if len(answers) == 0:
             return 0
@@ -325,34 +509,78 @@ def huansuan(input, dw):
 
 def clarify(data):
     data['tz'] = data['tz'].replace("水泥基防水涂料", "水泥基渗透结晶防水涂料")
-    data['tz'] = data['tz'].replace("无机保温砂浆", "无机轻集料保温砂浆")
+    data['tz'] = data['tz'].replace("回光灯带", "回光灯槽")
+    data['mc'] = data['mc'].replace("回光灯带", "回光灯槽")
+    data['tz'] = data['tz'].replace("自流平", "自流平地面")
     data['tz'] = data['tz'].replace("JS防水涂料", "聚合物水泥防水涂料")
+    data['tz'] = data['tz'].replace("聚合物水泥砂浆", "防水砂浆")
+    data['tz'] = data['tz'].replace("丝杆", "丝杆吊筋")
     if data['bianma'].startswith("010301"):##打桩
         print("clarify")
         result = callzaiclarify(data)
         if "是" in result:
             data['mc'] = data['mc'] + '填芯'
         return data, False
+    elif data['bianma'].startswith("010606012"): ##钢支架
+        if '镀锌方管' in data['tz'] or '镀锌圆管' in data['tz'] or '镀锌钢管' in data['tz'] or '镀锌角钢' in data['tz']:
+            data['tz'] = data['tz'].replace('支架', '骨架')
+            data['tz'] = data['tz'].replace('基架', '骨架')
+            data['mc'] = '钢骨架'
+            return data, False
     elif data['bianma'].startswith("010507001"): ##散水、坡道
         data['tz'] = lingji(data['tz'], aiclient, qwclient)
         data['tz'] = dianceng(data['tz'], aiclient, qwclient)
+        data['tz'] = mianceng(data['tz'], aiclient, qwclient)
+        data['tz'] = jieheceng(data['tz'], aiclient, qwclient)
         return data, False
     elif data['bianma'].startswith("0109"): ##防水
         data['tz'] = lingji(data['tz'], aiclient, qwclient)
         data['tz'] = dianceng(data['tz'], aiclient, qwclient)
+        data['tz'] = jieheceng(data['tz'], aiclient, qwclient)
         return data, False
     elif data['bianma'].startswith("0111"):
+        data['tz'] = data['tz'].replace("卷材面层", "面层")
+        if data['bianma'].startswith("011107"):##台阶
+            data['mc'] = '台阶面'
+
         data['tz'] = jieheceng(data['tz'], aiclient, qwclient)
         result = callzaikuailiao(data)
-        if '是' in result:
-            return data, True
+        if '不' in result:
+            return data, False
+        return data, True
+    elif data['bianma'].startswith("0112"):
+        if '镀锌方管' in data['tz'] or '镀锌圆管' in data['tz'] or '镀锌钢管' in data['tz'] or '镀锌角钢' in data['tz']:
+            data['tz'] = data['tz'].replace('支架', '骨架')
+            data['tz'] = data['tz'].replace('基架', '骨架')
+        data['tz'] = data['tz'].replace("铝单板", "铝单板幕墙")
+        data['tz'] = data['tz'].replace("阻燃板", "细木工板")
+        data['tz'] = data['tz'].replace("阻燃基层板", "细木工板")
+        data['tz'] = data['tz'].replace("水泥纤维板", "水泥压力板")
+        if '防潮层' in data['tz'] or '防水砂浆' in data['tz']:
+            data['mc']='墙柱面防水(防潮)'
+        result = callzaikuailiao_0112(data)
+        if '不' in result:
+            return data, False
+        return data, True
+    elif data['bianma'].startswith("0113"):
+        result = callzailonggu_0113(data)
+        if '不' in result:
+            return data, False
+        if not '上人' in data['tz']:
+            data['tz']=data['tz'].replace('龙骨','不上人型龙骨')
+        data['tz']=data['tz'].replace('铝格栅','铝格栅吊顶')
+        data['tz']=data['tz'].replace('纸面石膏板','纸面石膏板面层')
+        return data, True
+    elif data['bianma'].startswith("0117"):
+        data['mc']=data['mc'].replace('线条','檐沟')
+        data['tz']=data['tz'].replace('线条','檐沟')
         return data, False
 
     else:
         return data, False
 
-
-@celery_app.task
+from redis.exceptions import TimeoutError
+@celery_app.task(autoretry_for=(TimeoutError,))
 def process_data(data:dict)-> dict:
     placeholder, kuailiao = clarify(data)
     label = data['mc'] + ' ' + data['tz']
@@ -367,7 +595,11 @@ def process_data(data:dict)-> dict:
         result = cuoshi_collection.query(query_embeddings=embeddings,n_results=25)      
     else:
         result = collection.query(query_embeddings=embeddings,n_results=25)      
-
+    yqspecial=[]
+    if data['bianma'].startswith('0106'):
+        youqi = youqi_(data['tz'])
+        if '无' not in youqi:
+            yqspecial= f_youqi(data, youqi,collection,model,ce,name_label)
     d = result['documents'][0]
     print(d)
     if data['bianma'].startswith("0117"):
@@ -472,24 +704,44 @@ def process_data(data:dict)-> dict:
     #        cutoff = score - 0.1
     #        if cutoff < 0.3:
     #            cutoff = 0.3
-    if data['bianma'].startswith('0106'):##金属结构
+    if data['bianma'].startswith('0106') and '钢丝网' not in data['mc']:##金属结构
         score = -1
         for rank in ranks:
             if label_name[d[rank['corpus_id']]].startswith('7-') and label_name[d[rank['corpus_id']]] != '7-62' and label_name[d[rank['corpus_id']]] !='7-63':
                 score = rank['score']
                 break
         if score > -1 and score < cutoff:
-            cutoff = score - 0.05
+            cutoff = score - 0.01
             if cutoff < 0.3:
                 cutoff = 0.3
     if data['bianma'].startswith('0111'):##楼地面
         score = -1
         for rank in ranks:
-            if '13.4' in d[rank['corpus_id']] and '块料面层' in d[rank['corpus_id']]:
+            if ('13.4' in d[rank['corpus_id']] or '14.3' in d[rank['corpus_id']]) and '块料面层' in d[rank['corpus_id']]:
                 score = rank['score']
                 break
-        if score > -1 and score < cutoff:
-            cutoff = score - 0.05
+        if score > -1 and score < cutoff and kuailiao:
+            cutoff = score - 0.01
+            if cutoff < 0.3:
+                cutoff = 0.3
+    if data['bianma'].startswith('0112'):##墙柱面
+        score = -1
+        for rank in ranks:
+            if ('14.3' in d[rank['corpus_id']]) and '块料面层' in d[rank['corpus_id']]:
+                score = rank['score']
+                break
+        if score > -1 and score < cutoff and kuailiao:
+            cutoff = score - 0.01
+            if cutoff < 0.3:
+                cutoff = 0.3
+    if data['bianma'].startswith('0113'):##天棚
+        score = -1
+        for rank in ranks:
+            if '15.1.1' in d[rank['corpus_id']] or '15.1.2' in d[rank['corpus_id']] or '15.1.3' in d[rank['corpus_id']] or '15.1.4' in d[rank['corpus_id']] or '15.1.5' in d[rank['corpus_id']]:
+                score = rank['score']
+                break
+        if score > -1 and score < cutoff and kuailiao:
+            cutoff = score - 0.01
             if cutoff < 0.3:
                 cutoff = 0.3
     print("cutoff=" + str(cutoff))
@@ -542,25 +794,38 @@ def process_data(data:dict)-> dict:
                 notselected = notselected + basic[entry]
         notselected = [x for x in notselected if x not in selected]
     selected = list(set(selected))
-    if len(selected) == 0:
-        candidates=[]
-        for rank in ranks:
-            candidates.append(d[rank['corpus_id']])
-        selected = fallback(candidates, data, aiclient, qwclient, menchuang_collection, model)
-    selected = postprocess(selected, data, aiclient, qwclient,sfclient, label_name, name_dw)
+    candidates=[]
+    for rank in ranks:
+        candidates.append(d[rank['corpus_id']])
+    if len(selected) == 0 and not data['bianma'].startswith('0115'):
+        selected = fallback(candidates, data, aiclient, qwclient, simpleclient, menchuang_collection, model)
+    selected = postprocess(selected, data, aiclient, qwclient,simpleclient, label_name, name_dw, candidates,yqspecial)
     print("final selected")
     print(selected)
     result = [(label_name[x], huansuan_highlevel(data['bianma'], x, data['dw'], label_name[x], data['mc']+data['tz'])) for x in selected]  
     print("after haunsuan")
     print(result)
-    result = [(zuhe(x[0], label), x[1]) for x in result]  
+    result = [(zuhe(x[0], label).replace(',',''), x[1]) for x in result]  
     print("after zuhe")
     print(result)
     result = [(x[0], x[1], dedata.read_singledexilie2(10, x[0])) for x in result]
-    need = need_extra(data, aiclient, qwclient, result)
+    result = [(x[0], x[1], x[2], tihuan(x[0], x[2], label, aiclient, qwclient, simpleclient)) for x in result]
+    fuzhu = [analyze(x[2]) for x in result]
+    fuzhu = zip(selected, fuzhu)
+    fuzhu = [x for x in fuzhu]
+    fuzhu_selected = [select_fuzhu(x, label, aiclient, qwclient, simpleclient) for x in fuzhu]
+    need = need_extra(data, aiclient, qwclient, simpleclient, result)
+    replace={}
     if need:
-        extra_info = extra(data, aiclient, qwclient, menchuang_collection, model)
+        extra_info = extra(data, aiclient, qwclient, simpleclient, menchuang_collection, model, qita_collection)
+        replace['04290241']=extra_info##桩
+        replace['04290303']=extra_info
     else:
         extra_info = "无"
-    response = requests.post("http://localhost:3000/api/transform", json={'bianma': data['bianma'], 'mc': data['mc'], 'tz': data['tz'], 'dw': data['dw'], 'sl': data['sl'], 'n': data['n'], "extra": extra_info, 'result': result})
-    return {"result": response.json()}
+    if '高强螺栓' in extra_info:
+        response = requests.post("http://localhost:3000/api/transform", json={'bianma': data['bianma'], 'mc': data['mc'], 'tz': data['tz'], 'dw': data['dw'], 'sl': data['sl'], 'n': data['n'], "extra": extra_info, 'result': [], 'fuzhu': fuzhu_selected, 'replace': replace})
+        return {"result": response.json()}
+    else:
+        response = requests.post("http://localhost:3000/api/transform", json={'bianma': data['bianma'], 'mc': data['mc'], 'tz': data['tz'], 'dw': data['dw'], 'sl': data['sl'], 'n': data['n'], "extra": extra_info, 'result': result, 'fuzhu': fuzhu_selected, 'replace': replace})
+        return {"result": response.json()}
+        

+ 58 - 0
template.py

@@ -0,0 +1,58 @@
+def xuanxiang(json_string):
+    messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于[A,B]的数组作为“结果”。文字中可能还包含了分析过程,分析过程可能与最终结果不一致,你必须**忽略**分析过程,严格输出最终结果,仅输出英文字母代号,不得输出文字。 例如"+'''
+            给定文字如下:
+            <省略>...
+             推理过程:
+            <省略>...
+            7. 根据工作内容,最合适的选项是C, <省略>...
+            结果:[A,B,C,D]
+            <省略>...
+            你应该返回
+            {
+            'result': [A, B, C, D]
+            }
+            你不能返回以下内容:
+            {
+            'result': [C]
+            }
+            虽然分析过程认为C是正确答案,但是文字中写明“结果:[A,B,C,D]”.你必须**忽略**分析过程,严格输出文字中的最终结果
+            你也不能返回以下内容:
+            {
+            'result': 
+             "南通补充定额2016 第十三章 楼地面工程 砼楼地面涂刷一遍901胶素水泥浆",
+             "第十三章 楼地面工程 13.4 块料面层 13.4.1 石材块料面层 块料面层 石材块料面板 干硬性水泥砂浆 台阶"
+            }, 
+            因为你仅需要输出英文字母代号,比如A和B,不得输出文字内容
+
+             '''},
+        ]
+    return messages
+
+def expression(json_string):
+    messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一个类似于A=B的表达式作为答案,请将该最终答案以JSON格式输出(输出中不应该含有圆周率π,应化简为小数). 举个例子 :" + ''' 给定文字如下:
+            <省略>.......
+            因此,最终的换算关系是:A = 1 × B
+            即:A = B
+            <省略>.......
+            则你应该返回
+            {
+            'result': 'A=B'
+            }
+
+           再举个例子:给定文字如下:
+            <省略>.......
+            因此,最终的换算关系是:A = 0.11 × B
+            <省略>.......
+            则你应该返回
+            {
+            'result': 'A=0.11*B'
+            }
+
+
+             '''},
+        ]
+    return messages

+ 505 - 0
tihuan.py

@@ -0,0 +1,505 @@
+import json
+from tihuan_hunningtu import tihuan_hunningtu
+from tihuan_gangjin import tihuan_gangjin
+from tihuan_gangcai import tihuan_gangcai
+from tihuan_shajiang import tihuan_shajiang
+from tihuan_zhuan import tihuan_zhuan
+from tihuan_shicai import tihuan_shicai
+from tihuan_gai import tihuan_gai
+from tihuan_fangshui import tihuan_fangshui
+from tihuan_wa import tihuan_wa
+from tihuan_dizhuan import tihuan_dizhuan
+from tihuan_juancai import tihuan_juancai
+from tihuan_bancai import tihuan_bancai
+from config import simplemodel
+with open("name_label", "r") as f:
+    content = f.read()
+name_label = json.loads(content)
+def tihuan(
+       name,
+       dercj,
+       label,
+       aiclient,
+       qwclient,
+       sfclient
+       ):
+    result=[]
+    xuanxiang=[]
+    choice=[]
+    letters='ABC'
+    index = name.find('+')
+    if index > -1:
+        name = name[:index]
+    pos=[]
+    for i in range(len(name)):
+        if name[i] == '-':
+            pos.append(i)
+    if len(pos) > 1:
+        name = name[:pos[1]]
+    if name in ['13-26']:
+        return []
+    description = name_label[name]
+    rcjobj = json.loads(dercj)
+    clde = rcjobj['clde']
+    hit_zhuan=False
+    hit_kuai=False
+    for i in range(len(clde)):
+        bh = clde[i]['CLBH']
+        sl = clde[i]['SL']
+        if float(sl) < 1e-6:
+            continue
+        if bh.startswith("0413"):
+            hit_zhuan = True
+        if bh.startswith("0415"):
+            hit_kuai = True
+    for i in range(len(clde)):
+        bh = clde[i]['CLBH']
+        sl = clde[i]['SL']
+        if float(sl) < 1e-6:
+            continue
+        if bh in ['80010161']:
+            continue
+        if bh.startswith("8021"):
+            ##混凝土
+            completion = aiclient.chat.completions.create(
+            model="glm-4.5-air",
+            messages=[
+                {"role": "system", "content": "You are a helpful assistant."},
+                {"role": "user", "content": "以下是一段工作内容的描述: " + label + '\n其中包括了一道工序如下: ' +description +  '\n工序使用了混凝土(砼)作为材料之一。请从工作内容中找到与该工序最匹配的描述(注意区分面层跟找平找坡层),并从描述中抽取出混凝土的具体品种型号等信息。例如,如果工作内容的描述中写的是“C40微膨胀混凝土”,那么你应该原封不动的返回“C40微膨胀混凝土”。再例如,如果工作内容中描述混凝土种类商品砼,混凝土强度等级C15,那么你应该返回“C15商品砼”。如果工作内容中没有具体描述混凝土的信息,则返回“通用混凝土”'},
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+        )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            completion = sfclient.chat.completions.create(
+            model=simplemodel(),
+            messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一种混凝土名称作为答案,请将该混凝土名称输出"},                       
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+            )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            parts = json_string.split('\n')
+            parts = [x for x in parts if ':' in x]
+            if len(parts) == 0:
+                continue
+            parts = parts[0].split(':')
+            part = parts[1]
+            part = part.replace(',','')
+            part = part.replace('\'','')
+            part = part.replace('\"','')
+            part = part.replace(' ','')
+            
+            result.append(tihuan_hunningtu(clde[i]['CLBH'], clde[i]['CLMC'], clde[i]['YSJG'], part))
+        if bh.startswith("01010100"):
+            ##钢筋
+            completion = aiclient.chat.completions.create(
+            model="glm-4.5-air",
+            messages=[
+                {"role": "system", "content": "You are a helpful assistant."},
+                {"role": "user", "content": "以下是一段工作内容的描述: " + label + '\n其中包括了一道工序如下: ' +description +  '\n工序使用了钢筋作为材料之一。请从工作内容中找到与该工序最匹配的描述,并从描述中抽取出钢筋的具体品种型号等信息。例如,如果工作内容的描述中写的是“Φ25以内三级钢”,那么你应该原封不动的返回“Φ25以内三级钢”。如果工作内容中没有具体描述钢筋的信息,则返回“通用钢筋”'},
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+        )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            completion = sfclient.chat.completions.create(
+            model=simplemodel(),
+            messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一种钢筋名称作为答案,请将该钢筋名称输出"},                       
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+            )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            parts = json_string.split('\n')
+            parts = [x for x in parts if ':' in x]
+            if len(parts) == 0:
+                continue
+            parts = parts[0].split(':')
+            part = parts[1]
+            part = part.replace(',','')
+            part = part.replace('\'','')
+            part = part.replace('\"','')
+            part = part.replace(' ','')
+            
+            result.append(tihuan_gangjin(clde[i]['CLBH'], clde[i]['CLMC'], clde[i]['YSJG'], part))
+        if bh.startswith("800"):
+            ##砂浆
+            completion = aiclient.chat.completions.create(
+            model="glm-4.5-air",
+            messages=[
+                {"role": "system", "content": "You are a helpful assistant."},
+                {"role": "user", "content": "以下是一段工作内容的描述: " + label + '\n其中包括了一道工序如下: ' +description +  '\n工序使用了砂浆作为材料之一。请从工作内容中找到与该工序最匹配的描述(不要从工序文字中抽取描述)(注意区分找平找坡层跟面层保护层),并从抽取的描述中再抽取出砂浆的具体品种型号等信息。例如,如果工作内容的描述中写的是“Mb10水泥砂浆”,那么你应该原封不动的返回“Mb10水泥砂浆”。如果工作内容中没有具体描述砂浆的信息,则返回“通用砂浆”'},
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+        )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            completion = sfclient.chat.completions.create(
+            model=simplemodel(),
+            messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一种砂浆名称作为答案,请将该砂浆名称输出"},                       
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+            )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            parts = json_string.split('\n')
+            parts = [x for x in parts if ':' in x]
+            if len(parts) == 0:
+                continue
+            parts = parts[0].split(':')
+            parts = parts[1:]
+            part = ':'.join(parts)
+            part = part.replace(',','')
+            part = part.replace('\'','')
+            part = part.replace('\"','')
+            part = part.replace(' ','')
+            
+            result.append(tihuan_shajiang(clde[i]['CLBH'], clde[i]['CLMC'], clde[i]['YSJG'], part))
+        if bh.startswith("0413") and not hit_kuai:
+            ##砖
+            completion = aiclient.chat.completions.create(
+            model="glm-4.5-air",
+            messages=[
+                {"role": "system", "content": "You are a helpful assistant."},
+                {"role": "user", "content": "以下是一段工作内容的描述: " + label + '\n其中包括了一道工序如下: ' +description +  '\n工序使用了砖(砌块)作为材料之一。请从工作内容中找到与该工序最匹配的描述,并从描述中抽取出砖(砌块)的具体品种型号等信息。例如,如果工作内容的描述中写的是“MU20蒸压灰砂砖”,那么你应该原封不动的返回“MU20蒸压灰砂砖”。如果工作内容中没有具体描述砖(砌块)的信息,则返回“通用砖”'},
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+        )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            completion = sfclient.chat.completions.create(
+            model=simplemodel(),
+            messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一种砖(砌块)名称作为答案,请将该砖(砌块)名称输出"},                       
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+            )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            parts = json_string.split('\n')
+            parts = [x for x in parts if ':' in x]
+            if len(parts) == 0:
+                continue
+            parts = parts[0].split(':')
+            part = parts[1]
+            part = part.replace(',','')
+            part = part.replace('\'','')
+            part = part.replace('\"','')
+            part = part.replace(' ','')
+            
+            result.append(tihuan_zhuan(clde[i]['CLBH'], clde[i]['CLMC'], clde[i]['YSJG'], part))
+        if bh.startswith("0415"):
+            ##砌块
+            completion = aiclient.chat.completions.create(
+            model="glm-4.5-air",
+            messages=[
+                {"role": "system", "content": "You are a helpful assistant."},
+                {"role": "user", "content": "以下是一段工作内容的描述: " + label + '\n其中包括了一道工序如下: ' +description +  '\n工序使用了砖(砌块)作为材料之一。请从工作内容中找到与该工序最匹配的描述,并从描述中抽取出砖(砌块)的具体品种型号等信息。例如,如果工作内容的描述中写的是“MU20蒸压灰砂砖”,那么你应该原封不动的返回“MU20蒸压灰砂砖”。如果工作内容中没有具体描述砖(砌块)的信息,则返回“通用砖”'},
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+        )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            completion = sfclient.chat.completions.create(
+            model=simplemodel(),
+            messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一种砖(砌块)名称作为答案,请将该砖(砌块)名称输出"},                       
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+            )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            parts = json_string.split('\n')
+            parts = [x for x in parts if ':' in x]
+            if len(parts) == 0:
+                continue
+            parts = parts[0].split(':')
+            part = parts[1]
+            part = part.replace(',','')
+            part = part.replace('\'','')
+            part = part.replace('\"','')
+            part = part.replace(' ','')
+            
+            result.append(tihuan_zhuan(clde[i]['CLBH'], clde[i]['CLMC'], clde[i]['YSJG'], part))
+        if bh.startswith("0711213"):
+            ##石材块料
+            completion = aiclient.chat.completions.create(
+            model="glm-4.5-air",
+            messages=[
+                {"role": "system", "content": "You are a helpful assistant."},
+                {"role": "user", "content": "以下是一段工作内容的描述: " + label + '\n其中包括了一道工序如下: ' +description +  '\n工序使用了石材块料、砖作为材料之一。请从工作内容中找到与该工序最匹配的描述,并从描述中抽取出石材块料、砖的具体品种型号等信息。例如,如果工作内容的描述中写的是“30厚600X600芝麻黑火烧面花岗岩”,那么你应该原封不动的返回“30厚600X600芝麻黑火烧面花岗岩”。如果工作内容中没有具体描述石材块料、砖的信息,则返回“通用石材块料”'},
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+        )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            completion = sfclient.chat.completions.create(
+            model=simplemodel(),
+            messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一种石材块料、砖名称作为答案,请将该石材块料、砖名称输出"},                       
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+            )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            parts = json_string.split('\n')
+            parts = [x for x in parts if ':' in x]
+            if len(parts) == 0:
+                continue
+            parts = parts[0].split(':')
+            part = parts[1]
+            part = part.replace(',','')
+            part = part.replace('\'','')
+            part = part.replace('\"','')
+            part = part.replace(' ','')
+            
+            result.append(tihuan_shicai(clde[i]['CLBH'], clde[i]['CLMC'], clde[i]['YSJG'], part))
+        if bh.startswith("3301"):
+            ##盖板
+            completion = aiclient.chat.completions.create(
+            model="glm-4.5-air",
+            messages=[
+                {"role": "system", "content": "You are a helpful assistant."},
+                {"role": "user", "content": "以下是一段工作内容的描述: " + label + '\n其中包括了一道工序如下: ' +description +  '\n工序使用了盖板(篦子)作为材料之一。请从工作内容中找到与该工序最匹配的描述,并从描述中抽取出盖板(篦子)的具体品种型号等信息。例如,如果工作内容的描述中写的是“Φ1000带锁圆形钢盖板”,那么你应该原封不动的返回“Φ1000带锁圆形钢盖板”。如果工作内容中没有具体描述盖板(篦子)的信息,则返回“通用盖板”'},
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+        )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            completion = sfclient.chat.completions.create(
+            model=simplemodel(),
+            messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一种盖板(篦子)名称作为答案,请将该盖板(篦子)名称输出"},                       
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+            )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            parts = json_string.split('\n')
+            parts = [x for x in parts if ':' in x]
+            if len(parts) == 0:
+                continue
+            parts = parts[0].split(':')
+            part = parts[1]
+            part = part.replace(',','')
+            part = part.replace('\'','')
+            part = part.replace('\"','')
+            part = part.replace(' ','')
+            result.append(tihuan_gai(clde[i]['CLBH'], clde[i]['CLMC'], clde[i]['YSJG'], part))
+            
+        if bh.startswith("012701") or bh.startswith('012703'):
+            ##钢材
+            completion = aiclient.chat.completions.create(
+            model="glm-4.5-air",
+            messages=[
+                {"role": "system", "content": "You are a helpful assistant."},
+                {"role": "user", "content": "以下是一段工作内容的描述: " + label + '\n其中包括了一道工序如下: ' +description +  '\n工序使用了钢材作为材料之一。请从工作内容中找到与该工序最匹配的描述,并从描述中抽取出钢材的具体品种型号等信息。例如,如果工作内容的描述中写的是“Q235B”,那么你应该原封不动的返回“Q235B”。如果工作内容提及镀锌钢管,则你应该返回“镀锌钢管”。如果工作内容中没有具体描述钢材的信息,则返回“通用钢”'},
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+        )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            completion = sfclient.chat.completions.create(
+            model=simplemodel(),
+            messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一种钢材名称作为答案,请将该钢材名称输出"},                       
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+            )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            parts = json_string.split('\n')
+            parts = [x for x in parts if ':' in x]
+            if len(parts) == 0:
+                continue
+            parts = parts[0].split(':')
+            part = parts[1]
+            part = part.replace(',','')
+            part = part.replace('\'','')
+            part = part.replace('\"','')
+            part = part.replace(' ','')
+            
+            result.append(tihuan_gangcai(clde[i]['CLBH'], clde[i]['CLMC'], clde[i]['YSJG'], part))
+        if bh.startswith("11030746"):
+            ##防水涂料
+            completion = aiclient.chat.completions.create(
+            model="glm-4.5-air",
+            messages=[
+                {"role": "system", "content": "You are a helpful assistant."},
+                {"role": "user", "content": "以下是一段工作内容的描述: " + label + '\n其中包括了一道工序如下: ' +description +  '\n工序使用了防水涂料作为材料之一。请从工作内容中找到与该工序最匹配的描述,并从描述中抽取出防水涂料的具体品种型号等信息。例如,如果工作内容的描述中写的是“2.0厚非固化沥青防水涂料”,那么你应该原封不动的返回“2.0厚非固化沥青防水涂料”。如果工作内容中没有具体描述防水涂料的信息,则返回“通用防水涂料”'},
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+        )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            completion = sfclient.chat.completions.create(
+            model=simplemodel(),
+            messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一种防水涂料名称作为答案,请将该防水涂料名称输出"},                       
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+            )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            parts = json_string.split('\n')
+            parts = [x for x in parts if ':' in x]
+            if len(parts) == 0:
+                continue
+            parts = parts[0].split(':')
+            part = parts[1]
+            part = part.replace(',','')
+            part = part.replace('\'','')
+            part = part.replace('\"','')
+            part = part.replace(' ','')
+            
+            result.append(tihuan_fangshui(clde[i]['CLBH'], clde[i]['CLMC'], clde[i]['YSJG'], part))
+        if bh.startswith("041703"):
+            ##水泥彩瓦
+            completion = aiclient.chat.completions.create(
+            model="glm-4.5-air",
+            messages=[
+                {"role": "system", "content": "You are a helpful assistant."},
+                {"role": "user", "content": "以下是一段工作内容的描述: " + label + '\n其中包括了一道工序如下: ' +description +  '\n工序使用了瓦作为材料之一。请从工作内容中找到与该工序最匹配的描述,并从描述中抽取出瓦的具体品种型号等信息。例如,如果工作内容的描述中写的是“0.9mm铝镁锰合金仿古金属瓦”,那么你应该原封不动的返回“0.9mm铝镁锰合金仿古金属瓦”。如果工作内容中没有具体描述瓦的信息,则返回“通用瓦”'},
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+        )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            completion = sfclient.chat.completions.create(
+            model=simplemodel(),
+            messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一种瓦名称作为答案,请将该瓦名称输出"},                       
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+            )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            parts = json_string.split('\n')
+            parts = [x for x in parts if ':' in x]
+            if len(parts) == 0:
+                continue
+            parts = parts[0].split(':')
+            part = parts[1]
+            part = part.replace(',','')
+            part = part.replace('\'','')
+            part = part.replace('\"','')
+            part = part.replace(' ','')
+            
+            result.append(tihuan_wa(clde[i]['CLBH'], clde[i]['CLMC'], clde[i]['YSJG'], part))
+        if bh.startswith("066501"):
+            ##地砖
+            completion = aiclient.chat.completions.create(
+            model="glm-4.5-air",
+            messages=[
+                {"role": "system", "content": "You are a helpful assistant."},
+                {"role": "user", "content": "以下是一段工作内容的描述: " + label + '\n其中包括了一道工序如下: ' +description +  '\n工序使用了地砖作为材料之一。请从工作内容中找到与该工序最匹配的描述,并从描述中抽取出地砖的具体品种型号等信息。例如,如果工作内容的描述中写的是“10厚800X800防滑地砖”,那么你应该原封不动的返回“10厚800X800防滑地砖”。如果工作内容中没有具体描述地砖的信息,则返回“通用地砖”'},
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+        )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            completion = sfclient.chat.completions.create(
+            model=simplemodel(),
+            messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一种地砖名称作为答案,请将该地砖名称输出"},                       
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+            )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            parts = json_string.split('\n')
+            parts = [x for x in parts if ':' in x]
+            if len(parts) == 0:
+                continue
+            parts = parts[0].split(':')
+            part = parts[1]
+            part = part.replace(',','')
+            part = part.replace('\'','')
+            part = part.replace('\"','')
+            part = part.replace(' ','')
+            
+            result.append(tihuan_dizhuan(clde[i]['CLBH'], clde[i]['CLMC'], clde[i]['YSJG'], part))
+        if bh.startswith("11570") or bh.startswith('115721'):
+            ##卷材
+            completion = aiclient.chat.completions.create(
+            model="glm-4.5-air",
+            messages=[
+                {"role": "system", "content": "You are a helpful assistant."},
+                {"role": "user", "content": "以下是一段工作内容的描述: " + label + '\n其中包括了一道工序如下: ' +description +  '\n工序使用了卷材作为材料之一。请从工作内容中找到与该工序最匹配的描述,并从描述中抽取出卷材的具体品种型号等信息。例如,如果工作内容的描述中写的是“3.0厚SBS聚合物改性沥青防水卷材(PY)Ⅱ型聚酯胎”,那么你应该原封不动的返回“3.0厚SBS聚合物改性沥青防水卷材(PY)Ⅱ型聚酯胎”。如果工作内容中没有具体描述卷材的信息,则返回“通用卷材”'},
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+        )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            completion = sfclient.chat.completions.create(
+            model=simplemodel(),
+            messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一种卷材名称作为答案,请将该卷材名称输出"},                       
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+            )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            parts = json_string.split('\n')
+            parts = [x for x in parts if ':' in x]
+            if len(parts) == 0:
+                continue
+            parts = parts[0].split(':')
+            part = parts[1]
+            part = part.replace(',','')
+            part = part.replace('\'','')
+            part = part.replace('\"','')
+            part = part.replace(' ','')
+            
+            result.append(tihuan_juancai(clde[i]['CLBH'], clde[i]['CLMC'], clde[i]['YSJG'], part))
+        if bh.startswith("021103"):
+            ##挤塑板
+            completion = aiclient.chat.completions.create(
+            model="glm-4.5-air",
+            messages=[
+                {"role": "system", "content": "You are a helpful assistant."},
+                {"role": "user", "content": "以下是一段工作内容的描述: " + label + '\n其中包括了一道工序如下: ' +description +  '\n工序使用了板材作为材料之一。请从工作内容中找到与该工序最匹配的描述,并从描述中抽取出板材的具体品种型号等信息。例如,如果工作内容的描述中写的是“20厚Ⅰ型石墨聚苯板”,那么你应该原封不动的返回“20厚Ⅰ型石墨聚苯板”。如果工作内容中没有具体描述板材的信息,则返回“通用板材”'},
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+        )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            completion = sfclient.chat.completions.create(
+            model=simplemodel(),
+            messages=[
+            {"role": "system", "content": "You are a helpful assistant.请将最终答案以JSON格式输出"},
+            {"role": "user", "content": " 给你一段文字如下, " + json_string + ",其中给出了一种板材名称作为答案,请将该板材名称输出"},                       
+            ],
+            extra_body={"thinking": {"type": "disabled"}},
+            )
+            json_string = completion.choices[0].message.content
+            print(json_string)
+            parts = json_string.split('\n')
+            parts = [x for x in parts if ':' in x]
+            if len(parts) == 0:
+                continue
+            parts = parts[0].split(':')
+            part = parts[1]
+            part = part.replace(',','')
+            part = part.replace('\'','')
+            part = part.replace('\"','')
+            part = part.replace(' ','')
+            
+            result.append(tihuan_bancai(clde[i]['CLBH'], clde[i]['CLMC'], clde[i]['YSJG'], part))
+    return result

+ 4 - 0
tihuan_bancai.py

@@ -0,0 +1,4 @@
+def tihuan_bancai(old_bianma, old_mc, old_jg, new_name):
+    if '通用' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': old_mc, 'jg': old_jg}
+    return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}

+ 4 - 0
tihuan_dizhuan.py

@@ -0,0 +1,4 @@
+def tihuan_dizhuan(old_bianma, old_mc, old_jg, new_name):
+    if '通用' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': old_mc, 'jg': old_jg}
+    return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}

+ 4 - 0
tihuan_fangshui.py

@@ -0,0 +1,4 @@
+def tihuan_fangshui(old_bianma, old_mc, old_jg, new_name):
+    if '通用' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': old_mc, 'jg': old_jg}
+    return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}

+ 4 - 0
tihuan_gai.py

@@ -0,0 +1,4 @@
+def tihuan_gai(old_bianma, old_mc, old_jg, new_name):
+    if '通用' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': old_mc, 'jg': old_jg}
+    return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}

+ 4 - 0
tihuan_gangcai.py

@@ -0,0 +1,4 @@
+def tihuan_gangcai(old_bianma, old_mc, old_jg, new_name):
+    if '通用' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': old_mc, 'jg': old_jg}
+    return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': old_mc+new_name, 'jg': old_jg}

+ 4 - 0
tihuan_gangjin.py

@@ -0,0 +1,4 @@
+def tihuan_gangjin(old_bianma, old_mc, old_jg, new_name):
+    if 'Φ' in new_name or 'φ' in new_name or 'ø' in new_name or 'Q' in new_name or 'q' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}
+    return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': old_mc, 'jg': old_jg}

+ 28 - 0
tihuan_hunningtu.py

@@ -0,0 +1,28 @@
+def tihuan_hunningtu(old_bianma, old_mc, old_jg, new_name):
+    if '膨胀' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}
+    if 'C10' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': '80212101', 'mc': 'C10泵送商品砼', 'jg': '329'}
+    if 'C15' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': '80212102', 'mc': 'C15泵送商品砼', 'jg': '332'}
+    if 'C20' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': '80212103', 'mc': 'C20泵送商品砼', 'jg': '342'}
+    if 'C25' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': '80212104', 'mc': 'C25泵送商品砼', 'jg': '350'}
+    if 'C30' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': '80212105', 'mc': 'C30泵送商品砼', 'jg': '362'}
+    if 'C35' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': '80212106', 'mc': 'C35泵送商品砼', 'jg': '362'}
+    if 'C40' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': '80212107', 'mc': 'C40泵送商品砼', 'jg': '362'}
+    if 'C45' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': '80212108', 'mc': 'C45泵送商品砼', 'jg': '362'}
+    if 'C50' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': '80212109', 'mc': 'C50泵送商品砼', 'jg': '362'}
+    if 'C55' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': '80212110', 'mc': 'C55泵送商品砼', 'jg': '362'}
+    if 'C60' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': '80212111', 'mc': 'C60泵送商品砼', 'jg': '362'}
+    if 'C70' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': '80212112', 'mc': 'C70泵送商品砼', 'jg': '362'}
+    return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': old_mc, 'jg': old_jg}

+ 4 - 0
tihuan_juancai.py

@@ -0,0 +1,4 @@
+def tihuan_juancai(old_bianma, old_mc, old_jg, new_name):
+    if '通用' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': old_mc, 'jg': old_jg}
+    return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}

+ 65 - 0
tihuan_shajiang.py

@@ -0,0 +1,65 @@
+def tihuan_shajiang(old_bianma, old_mc, old_jg, new_name):
+    new_name = new_name.split('或')[0]
+    if '通用' in new_name or '清水' in new_name or '低强度' in new_name or 'DTG' in new_name or '素水泥浆' in new_name or 'DTA' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': old_mc, 'jg': old_jg}
+    if '专用' in new_name:
+        if 'Mb10' in new_name or 'M10' in new_name or 'Ma10' in new_name:
+            if old_bianma.startswith("8001"):
+                return {'bianma': old_bianma, 'new_bianma': '80010106', 'mc': new_name, 'jg': '191.53'}
+            elif old_bianma.startswith("8005"):
+                return {'bianma': old_bianma, 'new_bianma': '80050106', 'mc': new_name, 'jg': '199.56'}
+            else:
+                return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}
+        if 'Mb7.5' in new_name or 'M7.5' in new_name or 'Ma7.5' in new_name:
+            if old_bianma.startswith("8001"):
+                return {'bianma': old_bianma, 'new_bianma': '80010105', 'mc': new_name, 'jg': '182.23'}
+            elif old_bianma.startswith("8005"):
+                return {'bianma': old_bianma, 'new_bianma': '80050105', 'mc': new_name, 'jg': '195.2'}
+            else:
+                return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}
+        if 'Mb5' in new_name or 'M5' in new_name or 'Ma5' in new_name:
+            if old_bianma.startswith("8001"):
+                return {'bianma': old_bianma, 'new_bianma': '80010104', 'mc': new_name, 'jg': '180.37'}
+            elif old_bianma.startswith("8005"):
+                return {'bianma': old_bianma, 'new_bianma': '80050104', 'mc': new_name, 'jg': '193'}
+            else:
+                return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}
+        if 'Mb2.5' in new_name or 'M2.5' in new_name or 'Ma2.5' in new_name:
+            if old_bianma.startswith("8001"):
+                return {'bianma': old_bianma, 'new_bianma': '80010103', 'mc': new_name, 'jg': '175.72'}
+            elif old_bianma.startswith("8005"):
+                return {'bianma': old_bianma, 'new_bianma': '80050103', 'mc': new_name, 'jg': '188.64'}
+            else:
+                return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}
+        return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}
+    if '水泥砂浆' in new_name:
+        if 'Mb10' in new_name or 'M10' in new_name or 'Ma10' in new_name:
+            return {'bianma': old_bianma, 'new_bianma': '80010106', 'mc': new_name, 'jg': '191.53'}
+        if 'Mb7.5' in new_name or 'M7.5' in new_name or 'Ma7.5' in new_name:
+            return {'bianma': old_bianma, 'new_bianma': '80010105', 'mc': new_name, 'jg': '182.23'}
+        if 'Mb5' in new_name or 'M5' in new_name or 'Ma5' in new_name:
+            return {'bianma': old_bianma, 'new_bianma': '80010104', 'mc': new_name, 'jg': '180.37'}
+        if 'Mb2.5' in new_name or 'M2.5' in new_name or 'Ma2.5' in new_name:
+            return {'bianma': old_bianma, 'new_bianma': '80010103', 'mc': new_name, 'jg': '175.72'}
+        if '1:1' in new_name:
+            return {'bianma': old_bianma, 'new_bianma': '80010121', 'mc': new_name, 'jg': '308.42'}
+        if '1:1.5' in new_name:
+            return {'bianma': old_bianma, 'new_bianma': '80010122', 'mc': new_name, 'jg': '289.29'}
+        if '1:2' in new_name:
+            return {'bianma': old_bianma, 'new_bianma': '80010123', 'mc': new_name, 'jg': '275.64'}
+        if '1:2.5' in new_name:
+            return {'bianma': old_bianma, 'new_bianma': '80010124', 'mc': new_name, 'jg': '265.07'}
+        if '1:3' in new_name:
+            return {'bianma': old_bianma, 'new_bianma': '80010125', 'mc': new_name, 'jg': '239.65'}
+        return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}
+    if '石灰砂浆' in new_name:
+        if '1:2' in new_name:
+            return {'bianma': old_bianma, 'new_bianma': '80030103', 'mc': new_name, 'jg': '206.31'}
+        if '1:2.5' in new_name:
+            return {'bianma': old_bianma, 'new_bianma': '80030104', 'mc': new_name, 'jg': '200.91'}
+        if '1:3' in new_name:
+            return {'bianma': old_bianma, 'new_bianma': '80010105', 'mc': new_name, 'jg': '192.27'}
+        if '1:4' in new_name:
+            return {'bianma': old_bianma, 'new_bianma': '80010106', 'mc': new_name, 'jg': '168.51'}
+        return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}
+    return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}

+ 4 - 0
tihuan_shicai.py

@@ -0,0 +1,4 @@
+def tihuan_shicai(old_bianma, old_mc, old_jg, new_name):
+    if '通用' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': old_mc, 'jg': old_jg}
+    return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}

+ 4 - 0
tihuan_wa.py

@@ -0,0 +1,4 @@
+def tihuan_wa(old_bianma, old_mc, old_jg, new_name):
+    if '通用' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': old_mc, 'jg': old_jg}
+    return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}

+ 4 - 0
tihuan_zhuan.py

@@ -0,0 +1,4 @@
+def tihuan_zhuan(old_bianma, old_mc, old_jg, new_name):
+    if '通用' in new_name:
+        return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': old_mc, 'jg': old_jg}
+    return {'bianma': old_bianma, 'new_bianma': old_bianma, 'mc': new_name, 'jg': old_jg}

+ 24 - 3
util.py

@@ -121,13 +121,20 @@ def huizongrcj(data):
 
 
 def mergerg(rg1, rg2, coef):
-    
+    isolated=[]
     for entry in rg2:
         CLBH = entry['CLBH']
+        hit = False
         for toMerge in rg1:
+            print(toMerge)
             if toMerge['CLBH'] == CLBH:
+                hit = True
                 toMerge['gr'] = float(toMerge['gr']) + coef * float(entry['gr'])
                 toMerge['gf'] = float(toMerge['gf']) + coef * float(entry['gf'])
+        if not hit:
+            isolated.append({'CLBH':entry['CLBH'],'CLMC':entry['CLMC'],'JLDW':entry['JLDW'],'YSJG':float(entry['YSJG']),'gr':float(entry['gr']),'gf':float(entry['gf'])})
+    rg1 = rg1 + isolated
+    return rg1
 def multirg(rg1, coef):
     
     
@@ -137,13 +144,20 @@ def multirg(rg1, coef):
 
 
 def mergejx(rg1, rg2, coef):
-    
+    isolated=[]
     for entry in rg2:
         jxbh = entry['jxbh']
+        hit=False
         for toMerge in rg1:
+            print(toMerge)
             if toMerge['jxbh'] == jxbh:
+                hit=True
                 toMerge['sl'] = float(toMerge['sl']) + coef * float(entry['sl'])
                 toMerge['hj'] = float(toMerge['hj']) + coef * float(entry['hj'])
+        if not hit:
+            isolated.append({'jxbh':entry['jxbh'],'jxmc':entry['jxmc'],'DW':entry['DW'],'tbdj':float(entry['tbdj']),'sl':float(entry['sl']),'hj':float(entry['hj'])})
+    rg1 = rg1 + isolated
+    return rg1
 def multijx(rg1, coef):
     
     
@@ -154,13 +168,20 @@ def multijx(rg1, coef):
 
 
 def mergecl(rg1, rg2, coef):
-    
+    isolated=[]
     for entry in rg2:
         CLBH = entry['CLBH']
+        hit=False
         for toMerge in rg1:
+            print(toMerge)
             if toMerge['CLBH'] == CLBH:
+                hit=True
                 toMerge['SL'] = float(toMerge['SL']) + coef * float(entry['SL'])
                 toMerge['HJ'] = float(toMerge['HJ']) + coef * float(entry['HJ'])
+        if not hit:
+            isolated.append({'CLBH':entry['CLBH'],'CLMC':entry['CLMC'],'JLDW':entry['JLDW'],'YSJG':float(entry['YSJG']),'SL':float(entry['SL']),'HJ':float(entry['HJ'])})
+    rg1 = rg1 + isolated
+    return rg1
 def multicl(rg1, coef):
     
     

Некоторые файлы не были показаны из-за большого количества измененных файлов