dcata004 commited on
Commit
d3bbdc6
Β·
verified Β·
1 Parent(s): bace1b2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -35
app.py CHANGED
@@ -1,48 +1,101 @@
1
  import gradio as gr
2
 
3
- def audit_check(vendor_name, server_location, model_type):
4
- # This is simple logic to simulate your "Audit"
5
- risk_score = 0
6
- logs = []
7
-
8
- logs.append(f"πŸ” Auditing {vendor_name}...")
9
 
10
- if server_location == "USA" or server_location == "Unknown":
11
- risk_score += 50
12
- logs.append("❌ CRITICAL: Data hosted in US Jurisdiction (Cloud Act Risk).")
13
- else:
14
- logs.append("βœ… PASS: Data hosted in Safe Jurisdiction.")
 
 
 
15
 
16
- if model_type == "Public LLM (ChatGPT/Claude)":
17
- risk_score += 30
18
- logs.append("⚠️ HIGH: Public Model detected. Zero-Retention Agreement required.")
19
- else:
20
- logs.append("βœ… PASS: Private/Local Model detected.")
 
 
 
21
 
22
- if risk_score > 40:
23
- verdict = "πŸ”΄ NO-GO: High Compliance Risk"
24
- elif risk_score > 20:
25
- verdict = "🟑 CAUTION: Manual Review Needed"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  else:
27
- verdict = "🟒 GO: Low Risk / Approved"
 
 
28
 
29
- return verdict, "\n".join(logs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
- # The Interface
32
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
33
- gr.Markdown("# πŸ›‘οΈ Toro Governance Lab: Vendor Risk Engine")
34
- gr.Markdown("Instant preliminary risk assessment for Swiss/UK Banking Compliance (nFADP / EU AI Act).")
 
 
 
35
 
36
  with gr.Row():
37
- v_name = gr.Textbox(label="Vendor Name")
38
- loc = gr.Dropdown(["Switzerland", "EU (Germany/France)", "USA", "Unknown"], label="Server Location")
39
- model = gr.Dropdown(["Private/Local Model", "Public LLM (ChatGPT/Claude)"], label="AI Model Type")
40
-
41
- btn = gr.Button("Run Audit")
42
-
43
- out_verdict = gr.Label(label="Audit Verdict")
44
- out_logs = gr.Textbox(label="Audit Logs")
 
 
 
 
 
 
 
 
 
 
 
45
 
46
- btn.click(audit_check, inputs=[v_name, loc, model], outputs=[out_verdict, out_logs])
47
 
48
- demo.launch()
 
 
1
  import gradio as gr
2
 
3
+ def calculate_risk(data_type, users, location, use_case):
4
+ score = 0
5
+ reasons = []
 
 
 
6
 
7
+ # 1. Data Sensitivity Scoring (nFADP Art. 5)
8
+ if data_type == "Public Data":
9
+ score += 1
10
+ elif data_type == "Internal/Private":
11
+ score += 3
12
+ elif data_type == "Sensitive / Biometric / Medical":
13
+ score += 10
14
+ reasons.append("🚨 **High Risk Data:** Processing sensitive personal data requires explicit consent (nFADP) and strict DPIA (EU AI Act).")
15
 
16
+ # 2. User Volume (Systemic Risk)
17
+ if users == "< 1,000 DAU":
18
+ score += 1
19
+ elif users == "1,000 - 50,000 DAU":
20
+ score += 3
21
+ elif users == "> 50,000 DAU":
22
+ score += 5
23
+ reasons.append("πŸ“ˆ **High Volume:** Systems with >50k users are often classified as 'Systemic Risk' under EU AI Act.")
24
 
25
+ # 3. Server Location (Cross-Border Transfer)
26
+ if location == "Switzerland (CH)":
27
+ score += 0
28
+ reasons.append("βœ… **Sovereign Hosting:** Data resides in Switzerland. nFADP compliant.")
29
+ elif location == "European Union (EU)":
30
+ score += 1
31
+ reasons.append("βœ… **Adequate Protection:** EU is on the Swiss FDPIC 'Safe Country' list.")
32
+ elif location == "USA (Cloud Act Scope)":
33
+ score += 5
34
+ reasons.append("⚠️ **US Cloud Act Risk:** Transfer requires TIA (Transfer Impact Assessment) and SCCs.")
35
+ elif location == "Other / Global":
36
+ score += 7
37
+ reasons.append("🚨 **Unknown Jurisdiction:** High risk of data sovereignty violation.")
38
+
39
+ # Calculate Verdict
40
+ if score >= 12:
41
+ tier = "TIER 4: UNACCEPTABLE / HIGH RISK"
42
+ color = "red"
43
+ action = "πŸ›‘ STOP DEPLOYMENT. Requires full DPIA and Legal Review."
44
+ elif score >= 7:
45
+ tier = "TIER 3: SUBSTANTIAL RISK"
46
+ color = "orange"
47
+ action = "⚠️ PROCEED WITH CAUTION. Implement SCCs and Encryption."
48
  else:
49
+ tier = "TIER 1: LOW RISK"
50
+ color = "green"
51
+ action = "βœ… APPROVED for Pilot. Standard monitoring applies."
52
 
53
+ # Formatted Output
54
+ report = f"""
55
+ ## πŸ›‘οΈ Audit Verdict: <span style='color:{color}'>{tier}</span>
56
+
57
+ **Risk Score:** {score}/20
58
+
59
+ ### πŸ“‹ Compliance Actions Required:
60
+ {action}
61
+
62
+ ### πŸ” Detected Risk Factors:
63
+ """
64
+ for r in reasons:
65
+ report += f"\n- {r}"
66
+
67
+ return report
68
 
69
+ # --- UI Layout ---
70
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
71
+ gr.Markdown("""
72
+ # πŸ‡¨πŸ‡­ Swiss Risk Calculator
73
+ ### nFADP & EU AI Act Compliance Engine
74
+ **Cata Risk Lab** | Zurich β€’ London β€’ Miami
75
+ """)
76
 
77
  with gr.Row():
78
+ with gr.Column():
79
+ gr.Markdown("### 1. Deployment Details")
80
+ data_input = gr.Radio(
81
+ ["Public Data", "Internal/Private", "Sensitive / Biometric / Medical"],
82
+ label="Data Classification (nFADP Art. 5)"
83
+ )
84
+ users_input = gr.Radio(
85
+ ["< 1,000 DAU", "1,000 - 50,000 DAU", "> 50,000 DAU"],
86
+ label="Daily Active Users"
87
+ )
88
+ loc_input = gr.Radio(
89
+ ["Switzerland (CH)", "European Union (EU)", "USA (Cloud Act Scope)", "Other / Global"],
90
+ label="Hosting Jurisdiction"
91
+ )
92
+ btn = gr.Button("πŸ” Run Compliance Audit", variant="primary")
93
+
94
+ with gr.Column():
95
+ gr.Markdown("### 2. Risk Assessment Report")
96
+ output_box = gr.Markdown()
97
 
98
+ btn.click(fn=calculate_risk, inputs=[data_input, users_input, loc_input], outputs=output_box)
99
 
100
+ if __name__ == "__main__":
101
+ demo.launch()