Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 5 additions & 3 deletions ai/ai_simulations.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import random
import logging

class OffensiveSimulation:
def __init__(self):
Expand All @@ -12,7 +13,7 @@ def __init__(self):

def simulate_attack(self):
if not self.scenarios:
print("Error: No scenarios available for simulation.")
logging.error("Error: No scenarios available for simulation.")
return

try:
Expand All @@ -22,11 +23,12 @@ def simulate_attack(self):
print(f"[SIMULATION] Executing simulated attack: {scenario}")

except IndexError as e:
print(f"Error during simulation: {e}")
logging.error(f"Error during simulation: {e}")

except Exception as e:
print(f"Error during simulation: {e}")
logging.error(f"Error during simulation: {e}")

if __name__ == "__main__":
logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s - %(message)s')
simulation = OffensiveSimulation()
simulation.simulate_attack()
30 changes: 23 additions & 7 deletions app_security/app_vulnerability_scanner.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,20 +3,36 @@
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import time
import logging

DATABASE_URL = "sqlite:///document_analysis.db"
engine = create_engine(DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)

# Configure logging
logging.basicConfig(level=logging.ERROR, format='%(asctime)s - %(levelname)s - %(message)s')

def scan_application(app_url):
print(f"Scanning application for vulnerabilities: {app_url}")

# Input validation for app_url
if not isinstance(app_url, str) or not app_url.startswith("http"):
logging.error("Invalid app_url provided.")
return {"vulnerabilities_found": 0, "critical_issues": []}

retries = 3
for attempt in range(retries):
try:
session = SessionLocal()
try:
response = requests.get(app_url)
response.raise_for_status()

# Simulate a potential SQL injection vulnerability fix
if "vulnerable_param" in app_url:
logging.error("Potential SQL injection attempt detected.")
return {"vulnerabilities_found": 0, "critical_issues": ["Potential SQL Injection attempt detected."]}

vulnerabilities = {"vulnerabilities_found": 2, "critical_issues": ["SQL Injection", "XSS"]}

# Save scan results to the database
Expand All @@ -30,7 +46,7 @@ def scan_application(app_url):
session.commit()
return vulnerabilities
except requests.exceptions.HTTPError as http_err:
print(f"HTTP error occurred: {http_err}")
logging.error(f"HTTP error occurred: {http_err}")
scan_result = DocumentAnalysis(
source=app_url,
title="Vulnerability Scan",
Expand All @@ -40,7 +56,7 @@ def scan_application(app_url):
session.add(scan_result)
session.commit()
except Exception as err:
print(f"Other error occurred: {err}")
logging.error(f"Other error occurred: {err}")
scan_result = DocumentAnalysis(
source=app_url,
title="Vulnerability Scan",
Expand All @@ -52,12 +68,12 @@ def scan_application(app_url):
finally:
session.close()
except Exception as db_err:
print(f"Database connection error: {db_err}")
logging.error(f"Database connection error: {db_err}")
if attempt < retries - 1:
print("Retrying database connection...")
logging.error("Retrying database connection...")
time.sleep(2)
else:
print("Failed to connect to the database after multiple attempts.")
logging.error("Failed to connect to the database after multiple attempts.")
return {"vulnerabilities_found": 0, "critical_issues": []}
return {"vulnerabilities_found": 0, "critical_issues": []}

Expand All @@ -66,9 +82,9 @@ def verify_database_connection():
session = SessionLocal()
session.execute('SELECT 1')
session.close()
print("Database connection verified.")
logging.info("Database connection verified.")
except Exception as e:
print(f"Database connection verification failed: {e}")
logging.error(f"Database connection verification failed: {e}")

if __name__ == "__main__":
verify_database_connection()
Expand Down
36 changes: 23 additions & 13 deletions backend/ai_chat.py
Original file line number Diff line number Diff line change
@@ -1,71 +1,81 @@
import openai
import requests
import os
import logging
from backend.code_parser import CodeParser
from backend.pipeline_manager import PipelineManager

class MultiAIChat:
def __init__(self, openai_key, huggingface_key, anthropic_key):
self.openai_key = openai_key
self.huggingface_key = huggingface_key
self.anthropic_key = anthropic_key
def __init__(self):
self.openai_key = os.getenv("OPENAI_API_KEY")
self.huggingface_key = os.getenv("HUGGINGFACE_API_KEY")
self.anthropic_key = os.getenv("ANTHROPIC_API_KEY")
self.code_parser = CodeParser("")
self.pipeline_manager = PipelineManager()

def openai_chat(self, prompt):
if not self.openai_key:
print("Error: Missing OpenAI API key")
logging.error("Error: Missing OpenAI API key")
return ""
try:
openai.api_key = self.openai_key
response = openai.Completion.create(engine="text-davinci-003", prompt=prompt, max_tokens=100)
return response.choices[0].text.strip()
except Exception as e:
print(f"Error during OpenAI chat: {e}")
logging.error(f"Error during OpenAI chat: {e}")
return ""

def huggingface_chat(self, prompt):
if not self.huggingface_key:
print("Error: Missing HuggingFace API key")
logging.error("Error: Missing HuggingFace API key")
return ""
try:
url = "https://api-inference.huggingface.co/models/facebook/blenderbot-400M-distill"
headers = {"Authorization": f"Bearer {self.huggingface_key}"}
response = requests.post(url, json={"inputs": prompt}, headers=headers)
response.raise_for_status()
return response.json().get("generated_text", "")
except requests.exceptions.HTTPError as e:
logging.error(f"HTTP error during HuggingFace chat: {e}")
return ""
except Exception as e:
print(f"Error during HuggingFace chat: {e}")
logging.error(f"Error during HuggingFace chat: {e}")
return ""

def anthropic_chat(self, prompt):
if not self.anthropic_key:
print("Error: Missing Anthropic API key")
logging.error("Error: Missing Anthropic API key")
return ""
try:
url = "https://api.anthropic.com/v1/completion"
headers = {"Authorization": f"Bearer {self.anthropic_key}"}
response = requests.post(url, json={"prompt": prompt, "model": "claude-v1"})
response.raise_for_status()
return response.json().get("output", "")
except requests.exceptions.HTTPError as e:
logging.error(f"HTTP error during Anthropic chat: {e}")
return ""
except Exception as e:
print(f"Error during Anthropic chat: {e}")
logging.error(f"Error during Anthropic chat: {e}")
return ""

def parse_code(self, code):
try:
self.code_parser = CodeParser(code)
return self.code_parser.analyze_code()
except Exception as e:
print(f"Error during code parsing: {e}")
logging.error(f"Error during code parsing: {e}")
return {}

def manage_pipeline(self, task):
try:
return self.pipeline_manager.autogpt_task(task)
except Exception as e:
print(f"Error during pipeline management: {e}")
logging.error(f"Error during pipeline management: {e}")
return ""

if __name__ == "__main__":
chat = MultiAIChat("openai_key", "huggingface_key", "anthropic_key")
chat = MultiAIChat()
print(chat.openai_chat("Hello, how can I assist you today?"))
print(chat.parse_code("def example():\n return True"))
print(chat.manage_pipeline("Generate a weekly report."))
24 changes: 16 additions & 8 deletions backend/code_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,16 +28,24 @@ def __init__(self, code):
raise

def find_functions(self):
return [node.name for node in ast.walk(self.tree) if isinstance(node, ast.FunctionDef)]
try:
return [node.name for node in ast.walk(self.tree) if isinstance(node, ast.FunctionDef)]
except Exception as e:
logging.error(f"Unexpected error in find_functions: {e}")
return []

def analyze_code(self):
if not self.tree.body:
return {"error": "Empty code input"}
analysis = {
"num_functions": len(self.find_functions()),
"lines_of_code": len(self.tree.body),
}
return analysis
try:
if not self.tree.body:
return {"error": "Empty code input"}
analysis = {
"num_functions": len(self.find_functions()),
"lines_of_code": len(self.tree.body),
}
return analysis
except Exception as e:
logging.error(f"Unexpected error in analyze_code: {e}")
return {"error": "Analysis failed"}

def save_analysis_to_db(self, source, title, links, error):
session = SessionLocal()
Expand Down
5 changes: 3 additions & 2 deletions backend/pipeline_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from database.models import DocumentAnalysis
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import os

DATABASE_URL = "sqlite:///document_analysis.db"
engine = create_engine(DATABASE_URL)
Expand All @@ -18,7 +19,7 @@ def __init__(self):

def autogpt_task(self, task):
try:
api_key = "YOUR_API_KEY"
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
raise ValueError("Missing API key")
openai.api_key = api_key
Expand All @@ -43,7 +44,7 @@ def pinocchio_fact_check(self, text):
url = "https://factchecktools.googleapis.com/v1alpha1/claims:search"
params = {
"query": text,
"key": "YOUR_API_KEY"
"key": os.getenv("FACT_CHECK_API_KEY")
}
response = requests.get(url, params=params)
response.raise_for_status()
Expand Down
Loading
Loading