diff --git a/main.py b/main.py index 5f10a96..8272e02 100644 --- a/main.py +++ b/main.py @@ -93,15 +93,23 @@ class UMLGeneratorApp(ctk.CTk): value="java" ) self.java_radio.grid(row=0, column=0, padx=20, pady=10) - + self.csharp_radio = ctk.CTkRadioButton( self.options_frame, - text="C#", + text="C#", variable=self.language_var, value="csharp" ) self.csharp_radio.grid(row=0, column=1, padx=20, pady=10) + self.php_radio = ctk.CTkRadioButton( + self.options_frame, + text="PHP", + variable=self.language_var, + value="php" + ) + self.php_radio.grid(row=0, column=2, padx=20, pady=10) + self.generate_classes = tk.BooleanVar(value=True) self.classes_check = ctk.CTkCheckBox( @@ -299,7 +307,7 @@ class UMLGeneratorApp(ctk.CTk): self.generate_project_button.grid(row=2, column=0, columnspan=2, pady=20) def log_message(self, message): - """Ajoute un message dans la zone de log avec auto-scroll""" + self.log_text.insert(tk.END, message + "\n") self.log_text.see(tk.END) self.update_idletasks() @@ -313,7 +321,7 @@ class UMLGeneratorApp(ctk.CTk): self.project_path.insert(0, directory) def preview_and_generate_uml(self, classes, output_dir): - """Prévisualise et génère le diagramme UML si validé""" + from src.uml_generator import UMLGenerator import tempfile @@ -333,7 +341,7 @@ class UMLGeneratorApp(ctk.CTk): return None def preview_and_generate_readme(self, project_info, output_dir): - """Prévisualise et génère le README si validé""" + from src.readme_generator import ReadmeGenerator @@ -393,9 +401,12 @@ class UMLGeneratorApp(ctk.CTk): if self.language_var.get() == "java": from src.code_analyzer import JavaAnalyzer analyzer = JavaAnalyzer() - else: + elif self.language_var.get() == "csharp": from src.code_analyzer import CSharpAnalyzer analyzer = CSharpAnalyzer() + elif self.language_var.get() == "php": + from src.code_analyzer import PHPAnalyzer + analyzer = PHPAnalyzer() self.log_message("🔍 Analyse du code source en cours...") @@ -410,6 +421,22 @@ class UMLGeneratorApp(ctk.CTk): self.log_message(f" ↳ Hérite de: {cls['extends']}") if cls['implements']: self.log_message(f" ↳ Implémente: {', '.join(cls['implements'])}") + + + if 'associations' in cls and cls['associations']: + self.log_message(f" ↳ Associations: {', '.join(cls['associations'])}") + + + if 'dependencies' in cls and cls['dependencies']: + self.log_message(f" ↳ Dépendances: {', '.join(cls['dependencies'])}") + + + if 'aggregations' in cls and cls['aggregations']: + self.log_message(f" ↳ Agrégations: {', '.join(cls['aggregations'])}") + + + if 'compositions' in cls and cls['compositions']: + self.log_message(f" ↳ Compositions: {', '.join(cls['compositions'])}") if self.generate_classes.get(): diff --git a/src/code_analyzer.py b/src/code_analyzer.py index 3242ae4..cdb97e6 100644 --- a/src/code_analyzer.py +++ b/src/code_analyzer.py @@ -19,20 +19,46 @@ class CodeAnalyzer(ABC): class JavaAnalyzer(CodeAnalyzer): def __init__(self): self.classes = [] + self.class_names = set() self.relationships = [] def analyze_directory(self, directory_path): + + self._collect_class_names(directory_path) + + + return self._analyze_classes_and_generate_uml(directory_path) + + def _collect_class_names(self, directory_path): + + for root, _, files in os.walk(directory_path): + for file in files: + if file.endswith('.java'): + try: + with open(os.path.join(root, file), 'r', encoding='utf-8') as f: + content = f.read() + + tree = javalang.parse.parse(content) + for path, node in tree.filter(javalang.tree.ClassDeclaration): + self.class_names.add(node.name) + except Exception as e: + print(f"Erreur lors de la collecte des classes dans {file}: {str(e)}") + + def _analyze_classes_and_generate_uml(self, directory_path): + for root, _, files in os.walk(directory_path): for file in files: if file.endswith('.java'): self._analyze_file(os.path.join(root, file)) - return self.classes, self.relationships + + return self.classes, self.get_relationships() def _analyze_file(self, file_path): + try: with open(file_path, 'r', encoding='utf-8') as file: content = file.read() - + tree = javalang.parse.parse(content) for path, node in tree.filter(javalang.tree.ClassDeclaration): class_info = { @@ -40,10 +66,14 @@ class JavaAnalyzer(CodeAnalyzer): 'methods': [], 'attributes': [], 'extends': node.extends.name if node.extends else None, - 'implements': [impl.name for impl in node.implements] if node.implements else [] + 'implements': [impl.name for impl in node.implements] if node.implements else [], + 'associations': [], + 'dependencies': [], + 'aggregations': [], + 'compositions': [] } + - # Analyse des méthodes for method in node.methods: method_info = { 'name': method.name, @@ -51,8 +81,14 @@ class JavaAnalyzer(CodeAnalyzer): 'parameters': [(param.type.name, param.name) for param in method.parameters] } class_info['methods'].append(method_info) - - # Analyse des attributs + + + for statement in method.body: + if isinstance(statement, javalang.tree.MethodInvocation): + called_class = statement.qualifier.name if statement.qualifier else None + if called_class and called_class in self.class_names: + class_info['dependencies'].append(called_class) + for field in node.fields: for declarator in field.declarators: attribute_info = { @@ -60,8 +96,14 @@ class JavaAnalyzer(CodeAnalyzer): 'type': field.type.name } class_info['attributes'].append(attribute_info) - + + + attribute_type = field.type.name + if attribute_type not in ['int', 'double', 'String'] and attribute_type in self.class_names: + class_info['associations'].append(attribute_type) + self.classes.append(class_info) + except Exception as e: print(f"Erreur lors de l'analyse du fichier {file_path}: {str(e)}") @@ -69,45 +111,110 @@ class JavaAnalyzer(CodeAnalyzer): return self.classes def get_relationships(self): - return self.relationships + uml_content = "" + for class_info in self.classes: + + if class_info['extends'] and class_info['extends'] in self.class_names: + uml_content += f"{class_info['extends']} <|-- {class_info['name']}\n" + + + for interface in class_info['implements']: + if interface in self.class_names: + uml_content += f"{interface} <|.. {class_info['name']}\n" + + + if class_info['associations']: + for assoc_class in class_info['associations']: + uml_content += f"{class_info['name']} --> {assoc_class}\n" + + + if class_info['dependencies']: + for dep_class in class_info['dependencies']: + uml_content += f"{class_info['name']} ..> {dep_class}\n" + + + if class_info['aggregations']: + for agg_class in class_info['aggregations']: + uml_content += f"{class_info['name']} o--> {agg_class}\n" + + + if class_info['compositions']: + for comp_class in class_info['compositions']: + uml_content += f"{class_info['name']} *--> {comp_class}\n" + + return uml_content + class CSharpAnalyzer(CodeAnalyzer): def __init__(self): self.classes = [] + self.class_names = set() self.relationships = [] def analyze_directory(self, directory_path): + + self._collect_class_names(directory_path) + + + return self._analyze_classes_and_generate_uml(directory_path) + + def _collect_class_names(self, directory_path): + + for root, _, files in os.walk(directory_path): + for file in files: + if file.endswith('.cs'): + try: + with open(os.path.join(root, file), 'r', encoding='utf-8') as f: + content = f.read() + + + class_pattern = r'class\s+(\w+)(?:\s*:\s*(\w+))?' + for match in re.finditer(class_pattern, content): + class_name = match.group(1) + self.class_names.add(class_name) + except Exception as e: + print(f"Erreur lors de la collecte des classes dans {file}: {str(e)}") + + def _analyze_classes_and_generate_uml(self, directory_path): + for root, _, files in os.walk(directory_path): for file in files: if file.endswith('.cs'): self._analyze_file(os.path.join(root, file)) - return self.classes, self.relationships + + return self.classes, self.get_relationships() def _analyze_file(self, file_path): + try: with open(file_path, 'r', encoding='utf-8') as file: content = file.read() + - # Analyse basique avec regex pour C# - # Note: Une analyse plus robuste nécessiterait un parser C# complet class_pattern = r'class\s+(\w+)(?:\s*:\s*(\w+))?' method_pattern = r'(?:public|private|protected)\s+(?:static\s+)?(\w+)\s+(\w+)\s*\((.*?)\)' property_pattern = r'(?:public|private|protected)\s+(\w+)\s+(\w+)\s*{\s*get;\s*set;\s*}' - - # Recherche des classes + for match in re.finditer(class_pattern, content): class_name = match.group(1) base_class = match.group(2) - + class_info = { 'name': class_name, 'methods': [], 'attributes': [], 'extends': base_class, - 'implements': [] + 'implements': [], + 'associations': [], + 'dependencies': [], + 'aggregations': [], + 'compositions': [] } + + + self.class_names.add(class_name) + - # Recherche des méthodes for method_match in re.finditer(method_pattern, content): method_info = { 'name': method_match.group(2), @@ -115,16 +222,162 @@ class CSharpAnalyzer(CodeAnalyzer): 'parameters': method_match.group(3).split(',') if method_match.group(3) else [] } class_info['methods'].append(method_info) - - # Recherche des propriétés + for prop_match in re.finditer(property_pattern, content): attribute_info = { 'name': prop_match.group(2), 'type': prop_match.group(1) } class_info['attributes'].append(attribute_info) + + + attribute_type = prop_match.group(1) + if attribute_type not in ['int', 'double', 'string'] and attribute_type in self.class_names: + class_info['associations'].append(attribute_type) + + self.classes.append(class_info) + + except Exception as e: + print(f"Erreur lors de l'analyse du fichier {file_path}: {str(e)}") + + def get_classes(self): + return self.classes + + def get_relationships(self): + uml_content = "" + for class_info in self.classes: + + if class_info['extends'] and class_info['extends'] in self.class_names: + uml_content += f"{class_info['extends']} <|-- {class_info['name']}\n" + + + for interface in class_info['implements']: + if interface in self.class_names: + uml_content += f"{interface} <|.. {class_info['name']}\n" + + + if class_info['associations']: + for assoc_class in class_info['associations']: + uml_content += f"{class_info['name']} --> {assoc_class}\n" + + + if class_info['dependencies']: + for dep_class in class_info['dependencies']: + uml_content += f"{class_info['name']} ..> {dep_class}\n" + + + if class_info['aggregations']: + for agg_class in class_info['aggregations']: + uml_content += f"{class_info['name']} o--> {agg_class}\n" + + + if class_info['compositions']: + for comp_class in class_info['compositions']: + uml_content += f"{class_info['name']} *--> {comp_class}\n" + + return uml_content + +class PHPAnalyzer(CodeAnalyzer): + def __init__(self): + self.classes = [] + self.class_names = set() + self.relationships = [] + + def analyze_directory(self, directory_path): + + self._collect_class_names(directory_path) + + + return self._analyze_classes_and_generate_uml(directory_path) + + def _collect_class_names(self, directory_path): + + for root, _, files in os.walk(directory_path): + for file in files: + if file.endswith('.php'): + try: + with open(os.path.join(root, file), 'r', encoding='utf-8') as f: + content = f.read() + + + class_pattern = r'class\s+(\w+)(?:\s+extends\s+(\w+))?(?:\s+implements\s+([^{]+))?' + for match in re.finditer(class_pattern, content): + class_name = match.group(1) + self.class_names.add(class_name) + except Exception as e: + print(f"Erreur lors de la collecte des classes dans {file}: {str(e)}") + + def _analyze_classes_and_generate_uml(self, directory_path): + + for root, _, files in os.walk(directory_path): + for file in files: + if file.endswith('.php'): + self._analyze_file(os.path.join(root, file)) + + return self.classes, self.get_relationships() + + def _analyze_file(self, file_path): + + try: + with open(file_path, 'r', encoding='utf-8') as file: + content = file.read() + + + class_pattern = r'class\s+(\w+)(?:\s+extends\s+(\w+))?(?:\s+implements\s+([^{]+))?' + method_pattern = r'(?:public|private|protected)?\s+function\s+(\w+)\s*\((.*?)\)(?:\s*:\s*(\??\w+))?' + property_pattern = r'(?:public|private|protected)\s+\$(\w+)(?:\s*:\s*(\w+))?' + + for class_match in re.finditer(class_pattern, content): + class_name = class_match.group(1) + base_class = class_match.group(2) + implements = class_match.group(3).split(',') if class_match.group(3) else [] + implements = [i.strip() for i in implements] + + class_info = { + 'name': class_name, + 'methods': [], + 'attributes': [], + 'extends': base_class, + 'implements': implements, + 'associations': [], + 'dependencies': [], + 'aggregations': [], + 'compositions': [] + } + + + for method_match in re.finditer(method_pattern, content): + method_info = { + 'name': method_match.group(1), + 'parameters': [param.strip() for param in method_match.group(2).split(',') if param.strip()], + 'return_type': method_match.group(3) if method_match.group(3) else 'void' + } + class_info['methods'].append(method_info) + + + method_content = content[method_match.end():content.find('}', method_match.end())] + for class_name in self.class_names: + if f'new {class_name}' in method_content: + if class_name not in class_info['dependencies']: + class_info['dependencies'].append(class_name) + + for prop_match in re.finditer(property_pattern, content): + prop_name = prop_match.group(1) + prop_type = prop_match.group(2) if prop_match.group(2) else 'mixed' + + attribute_info = { + 'name': prop_name, + 'type': prop_type + } + class_info['attributes'].append(attribute_info) + + + if prop_type in self.class_names: + class_info['associations'].append(prop_type) + self.classes.append(class_info) + except Exception as e: print(f"Erreur lors de l'analyse du fichier {file_path}: {str(e)}") @@ -132,4 +385,35 @@ class CSharpAnalyzer(CodeAnalyzer): return self.classes def get_relationships(self): - return self.relationships + uml_content = "" + for class_info in self.classes: + + if class_info['extends'] and class_info['extends'] in self.class_names: + uml_content += f"{class_info['extends']} <|-- {class_info['name']}\n" + + + for interface in class_info['implements']: + if interface in self.class_names: + uml_content += f"{interface} <|.. {class_info['name']}\n" + + + if class_info['associations']: + for assoc_class in set(class_info['associations']): + uml_content += f"{class_info['name']} --> {assoc_class}\n" + + + if class_info['dependencies']: + for dep_class in set(class_info['dependencies']): + uml_content += f"{class_info['name']} ..> {dep_class}\n" + + + if class_info['aggregations']: + for agg_class in set(class_info['aggregations']): + uml_content += f"{class_info['name']} o--> {agg_class}\n" + + + if class_info['compositions']: + for comp_class in set(class_info['compositions']): + uml_content += f"{class_info['name']} *--> {comp_class}\n" + + return uml_content \ No newline at end of file diff --git a/src/preview_window.py b/src/preview_window.py index 58e160d..81da608 100644 --- a/src/preview_window.py +++ b/src/preview_window.py @@ -7,24 +7,24 @@ class PreviewWindow(ctk.CTkToplevel): def __init__(self, parent, content_type="readme", content=None, image_path=None): super().__init__(parent) - # Configuration de la fenêtre + self.title("Prévisualisation") self.geometry("800x600") - # Variables + self.result = False - # Configuration de la grille principale + self.grid_columnconfigure(0, weight=1) self.grid_rowconfigure(0, weight=1) - # Frame principal + self.main_frame = ctk.CTkFrame(self) self.main_frame.grid(row=0, column=0, padx=20, pady=20, sticky="nsew") self.main_frame.grid_columnconfigure(0, weight=1) self.main_frame.grid_rowconfigure(0, weight=1) - # Zone de prévisualisation + if content_type == "readme": self.preview_text = ctk.CTkTextbox( self.main_frame, @@ -34,19 +34,19 @@ class PreviewWindow(ctk.CTkToplevel): self.preview_text.grid(row=0, column=0, padx=10, pady=10, sticky="nsew") self.preview_text.insert("1.0", content) self.preview_text.configure(state="disabled") - else: # UML + else: self.preview_frame = ctk.CTkFrame(self.main_frame) self.preview_frame.grid(row=0, column=0, padx=10, pady=10, sticky="nsew") if os.path.exists(image_path): - # Charger et afficher l'image + image = Image.open(image_path) - # Calculer les dimensions pour l'ajustement - frame_width = 760 # 800 - 2*20 (padding) - frame_height = 520 # 600 - 2*20 (padding) - 40 (boutons) - # Redimensionner l'image en conservant les proportions + frame_width = 760 + frame_height = 520 + + ratio = min(frame_width/image.width, frame_height/image.height) new_width = int(image.width * ratio) new_height = int(image.height * ratio) @@ -60,7 +60,7 @@ class PreviewWindow(ctk.CTkToplevel): ) self.image_label.grid(row=0, column=0, padx=10, pady=10) self.image_label.configure(image=photo) - self.image_label.image = photo # Garder une référence + self.image_label.image = photo else: self.error_label = ctk.CTkLabel( self.preview_frame, @@ -69,12 +69,12 @@ class PreviewWindow(ctk.CTkToplevel): ) self.error_label.grid(row=0, column=0, padx=10, pady=10) - # Frame pour les boutons + self.button_frame = ctk.CTkFrame(self.main_frame) self.button_frame.grid(row=1, column=0, padx=10, pady=10, sticky="ew") self.button_frame.grid_columnconfigure((0, 1), weight=1) - # Boutons + self.validate_button = ctk.CTkButton( self.button_frame, text="Valider", @@ -91,7 +91,7 @@ class PreviewWindow(ctk.CTkToplevel): ) self.cancel_button.grid(row=0, column=1, padx=10, pady=5) - # Rendre la fenêtre modale + self.transient(parent) self.grab_set() diff --git a/src/project_analyzer.py b/src/project_analyzer.py index 3027f96..66d0e0f 100644 --- a/src/project_analyzer.py +++ b/src/project_analyzer.py @@ -30,7 +30,7 @@ class ProjectAnalyzer: 'dotnet': ['.csproj', '.sln'] } - # Frameworks et bibliothèques spécifiques à détecter dans les fichiers + self.framework_patterns = { 'django': ['django'], 'flask': ['flask'], @@ -53,27 +53,27 @@ class ProjectAnalyzer: } def analyze_technologies(self, project_path): - """Analyse les technologies utilisées dans le projet""" + technologies = defaultdict(int) frameworks = defaultdict(int) - # Parcourir tous les fichiers du projet + for root, _, files in os.walk(project_path): - if '.git' in root: # Ignorer le dossier .git + if '.git' in root: continue for file in files: file_path = os.path.join(root, file) - # Détecter les technologies par extension/nom de fichier + for tech, patterns in self.tech_patterns.items(): for pattern in patterns: if file.endswith(pattern) or file == pattern: technologies[tech] += 1 - # Analyser le contenu des fichiers pour détecter les frameworks + try: - if os.path.getsize(file_path) < 1000000: # Limiter aux fichiers < 1MB + if os.path.getsize(file_path) < 1000000: with open(file_path, 'r', encoding='utf-8') as f: content = f.read().lower() for framework, patterns in self.framework_patterns.items(): @@ -83,22 +83,22 @@ class ProjectAnalyzer: except (UnicodeDecodeError, IOError): continue - # Filtrer les technologies et frameworks les plus utilisés + significant_technologies = {k: v for k, v in technologies.items() if v > 0} significant_frameworks = {k: v for k, v in frameworks.items() if v > 0} - # Formater les résultats + tech_list = [] - # Ajouter les langages principaux + for tech, count in significant_technologies.items(): tech_list.append(f"{tech.capitalize()} - Langage/Technologie principale") - # Ajouter les frameworks + for framework, count in significant_frameworks.items(): tech_list.append(f"{framework.capitalize()} - Framework/Bibliothèque") - # Ajouter les outils de build/gestion de dépendances + build_tools = {'maven', 'gradle', 'npm', 'pip', 'dotnet'} for tool in build_tools: if tool in significant_technologies: @@ -107,13 +107,13 @@ class ProjectAnalyzer: return tech_list def get_git_authors(self, project_path): - """Récupère les auteurs depuis l'historique Git avec leurs contributions""" + try: - # Vérifier si le projet est un dépôt git + if not os.path.exists(os.path.join(project_path, '.git')): return [] - # Récupérer les auteurs avec leurs contributions + cmd_log = ['git', 'shortlog', '-sne', '--all'] process = subprocess.Popen(cmd_log, stdout=subprocess.PIPE, @@ -125,7 +125,7 @@ class ProjectAnalyzer: authors = [] for line in output.strip().split('\n'): if line.strip(): - # Format: "123\tAuthor Name " + parts = line.strip().split('\t') if len(parts) == 2: commits = parts[0].strip() @@ -133,7 +133,7 @@ class ProjectAnalyzer: name = author_info[0].strip() email = author_info[1].rstrip('>') - # Récupérer les statistiques de contribution + cmd_stat = ['git', 'log', '--author=' + email, '--pretty=tformat:', '--numstat'] process = subprocess.Popen(cmd_stat, stdout=subprocess.PIPE, @@ -142,7 +142,7 @@ class ProjectAnalyzer: text=True) stat_output, _ = process.communicate() - # Calculer les lignes ajoutées/supprimées + added = 0 deleted = 0 for stat_line in stat_output.strip().split('\n'): @@ -156,7 +156,7 @@ class ProjectAnalyzer: except ValueError: continue - # Récupérer la dernière contribution + cmd_last = ['git', 'log', '-1', '--format=%ai', f'--author={email}'] process = subprocess.Popen(cmd_last, stdout=subprocess.PIPE, @@ -166,7 +166,7 @@ class ProjectAnalyzer: last_date, _ = process.communicate() last_date = last_date.strip() - # Formater la date si disponible + if last_date: from datetime import datetime date_obj = datetime.strptime(last_date.split()[0], '%Y-%m-%d') @@ -187,32 +187,32 @@ class ProjectAnalyzer: return [] def get_prerequisites(self, project_path): - """Analyse le projet pour déterminer les prérequis""" + prerequisites = [] - # Vérifier les différents fichiers de dépendances + req_file = os.path.join(project_path, 'requirements.txt') if os.path.exists(req_file): - # Lire toutes les dépendances + with open(req_file, 'r', encoding='utf-8') as f: dependencies = [line.strip() for line in f if line.strip() and not line.startswith('#')] - # Ajouter Python avec une version spécifique si trouvée - python_version = "3.x" # Version par défaut + + python_version = "3.x" for dep in dependencies: if dep.lower().startswith("python"): python_version = dep.split("==")[-1] if "==" in dep else "3.x" break prerequisites.append(f"- Python {python_version}") - # Grouper les dépendances par catégorie + ui_deps = [] parsing_deps = [] diagram_deps = [] other_deps = [] for dep in dependencies: - # Extraire le nom et la version + if "==" in dep: name, version = dep.split("==") name = name.strip().lower() @@ -221,7 +221,7 @@ class ProjectAnalyzer: name = dep.strip().lower() version = "(dernière version)" - # Classifier la dépendance + if name in ['customtkinter', 'tkinter', 'pillow']: ui_deps.append(f"{dep} - {version}") elif name in ['antlr4-python3-runtime', 'javalang', 'pyparsing']: @@ -231,7 +231,7 @@ class ProjectAnalyzer: else: other_deps.append(f"{dep} - {version}") - # Ajouter les dépendances groupées + if ui_deps: prerequisites.append("\n### Interface graphique") for dep in ui_deps: @@ -252,11 +252,11 @@ class ProjectAnalyzer: for dep in other_deps: prerequisites.append(f"- {dep}") - # Vérifier Java pour PlantUML + if os.path.exists(req_file) and any('plantuml' in line.lower() for line in open(req_file)): prerequisites.insert(1, "- Java Runtime Environment (JRE) - Requis pour PlantUML") - # Ajouter les outils de développement recommandés + prerequisites.append("\n### Outils de développement recommandés") prerequisites.append("- Un IDE Python (PyCharm, VSCode, etc.)") prerequisites.append("- Git pour le contrôle de version") @@ -264,20 +264,20 @@ class ProjectAnalyzer: return prerequisites def get_installation_steps(self, project_path): - """Génère les étapes d'installation en fonction du projet""" + steps = [] - # Étape 1 : Clonage du projet + if os.path.exists(os.path.join(project_path, '.git')): try: - # Récupérer l'URL du dépôt distant + cmd = ['git', 'config', '--get', 'remote.origin.url'] process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=project_path, text=True) output, _ = process.communicate() repo_url = output.strip() if output.strip() else '[URL_DU_PROJET]' - # Récupérer le nom du projet + project_name = os.path.basename(project_path) steps.append( @@ -292,7 +292,7 @@ class ProjectAnalyzer: "**Télécharger le projet**\n Téléchargez et décompressez le projet dans un dossier de votre choix" ) - # Étape 2 : Installation des dépendances + if os.path.exists(os.path.join(project_path, 'requirements.txt')): steps.append( "**Installer les dépendances Python**\n ```bash\n pip install -r requirements.txt\n ```" @@ -318,7 +318,7 @@ class ProjectAnalyzer: "**Restaurer et compiler le projet .NET**\n ```bash\n dotnet restore\n dotnet build\n ```" ) - # Étape 3 : Lancement de l'application + main_files = { 'main.py': "**Lancer l'application**\n ```bash\n python main.py\n ```", 'app.py': "**Lancer l'application**\n ```bash\n python app.py\n ```", @@ -339,6 +339,6 @@ class ProjectAnalyzer: except json.JSONDecodeError: pass - # Si aucune étape de lancement n'est détectée, ne pas ajouter d'étape par défaut + return steps diff --git a/src/readme_generator.py b/src/readme_generator.py index bea5fd0..c5c781e 100644 --- a/src/readme_generator.py +++ b/src/readme_generator.py @@ -29,13 +29,13 @@ Ce projet est distribué sous la licence {license}. ''' def analyze_project(self, project_path): - """Analyse le projet pour détecter ses caractéristiques""" + project_info = {} - # Détecter le nom du projet + project_info['title'] = os.path.basename(project_path) - # Détecter la description à partir du README existant ou des commentaires dans les fichiers + description = "" readme_files = ['README.md', 'README.txt', 'README'] for readme in readme_files: @@ -43,7 +43,7 @@ Ce projet est distribué sous la licence {license}. if os.path.exists(readme_path): with open(readme_path, 'r', encoding='utf-8') as f: content = f.read() - # Chercher une description entre les balises ou après le titre + import re desc_match = re.search(r'description[>\n]+(.*?)\n\n', content, re.I | re.S) if desc_match: @@ -51,7 +51,7 @@ Ce projet est distribué sous la licence {license}. break if not description: - # Chercher dans les docstrings des fichiers Python + for root, _, files in os.walk(project_path): if '.git' in root or '__pycache__' in root: continue @@ -60,7 +60,7 @@ Ce projet est distribué sous la licence {license}. try: with open(os.path.join(root, file), 'r', encoding='utf-8') as f: content = f.read() - # Chercher une docstring au début du fichier + doc_match = re.search(r'"""(.*?)"""', content, re.S) if doc_match: description = doc_match.group(1).strip() @@ -72,9 +72,9 @@ Ce projet est distribué sous la licence {license}. project_info['description'] = description if description else "Description à remplir" - # Détecter la licence + license_files = ['LICENSE', 'LICENSE.txt', 'LICENSE.md'] - license_type = "MIT" # Licence par défaut + license_type = "MIT" for license_file in license_files: license_path = os.path.join(project_path, license_file) if os.path.exists(license_path): @@ -94,23 +94,23 @@ Ce projet est distribué sous la licence {license}. return project_info def generate_readme_content(self, project_info): - """Génère le contenu du README sans l'écrire dans un fichier""" - # Formatage des prérequis (déjà formatés par l'analyseur) + + prerequisites = "\n".join(project_info.get('prerequisites', [])) - # Formatage des étapes d'installation + installation_steps = "\n\n".join(project_info.get('installation_steps', [])) - # Formatage des technologies + technologies = "Ce projet utilise les technologies suivantes :\n\n" technologies += "\n".join([f"* {tech}" for tech in project_info.get('technologies', [])]) - # Formatage des auteurs avec leurs contributions + authors = "\n\n".join([f"{author}" for author in project_info.get('authors', [])]) if not authors: authors = "*Aucun contributeur listé*" - # Remplacement des placeholders + return self.template.format( title=project_info.get('title', 'Projet Sans Titre'), description=project_info.get('description', ''), @@ -122,18 +122,18 @@ Ce projet est distribué sous la licence {license}. ) def generate_readme(self, project_info, output_path): - """Génère le fichier README.md basé sur les informations du projet""" - # Analyser le projet pour compléter les informations manquantes + + project_analysis = self.analyze_project(output_path) - # Fusionner les informations fournies avec celles détectées + merged_info = project_analysis.copy() - merged_info.update(project_info) # Les infos fournies ont la priorité + merged_info.update(project_info) + - # Générer le contenu content = self.generate_readme_content(merged_info) - # Sauvegarde du fichier README + readme_file = os.path.join(output_path, "README.md") with open(readme_file, "w", encoding="utf-8") as f: f.write(content) diff --git a/src/uml_generator.py b/src/uml_generator.py index b8a01b6..e94616a 100644 --- a/src/uml_generator.py +++ b/src/uml_generator.py @@ -1,11 +1,18 @@ import os from plantuml import PlantUML +from plantuml import PlantUMLHTTPError class UMLGenerator: def __init__(self): self.url = "http://www.plantuml.com/plantuml/img/" self.plantuml = PlantUML(url=self.url) - + + def log_message(self, message): + + print(message) + if "ERREUR" in message: + print("=== Génération interrompue ===") + def generate_class_diagram(self, classes, output_path): uml_content = "@startuml\n\n" @@ -18,7 +25,7 @@ class UMLGenerator: uml_content += "skinparam classArrowColor #800000\n" uml_content += "skinparam classFontColor black\n" uml_content += "skinparam classFontName Tahoma\n\n" - + for class_info in classes: uml_content += f"class {class_info['name']} {{\n" @@ -33,7 +40,7 @@ class UMLGenerator: uml_content += f" +{method['name']}({params}): {method['return_type']}\n" uml_content += "}\n\n" - + if class_info['extends']: uml_content += f"{class_info['extends']} <|-- {class_info['name']}\n" @@ -41,23 +48,54 @@ class UMLGenerator: for interface in class_info['implements']: uml_content += f"{interface} <|.. {class_info['name']}\n" + + + if 'associations' in class_info: + for assoc_class in class_info['associations']: + uml_content += f"{class_info['name']} --> {assoc_class}\n" + + + if 'dependencies' in class_info: + for dep_class in class_info['dependencies']: + uml_content += f"{class_info['name']} ..> {dep_class}\n" + + + if 'aggregations' in class_info: + for agg_class in class_info['aggregations']: + uml_content += f"{class_info['name']} o--> {agg_class}\n" + + + if 'compositions' in class_info: + for comp_class in class_info['compositions']: + uml_content += f"{class_info['name']} *--> {comp_class}\n" uml_content += "@enduml" - uml_file = os.path.join(output_path, "class_diagram.puml") - with open(uml_file, "w", encoding="utf-8") as f: + uml_temp_file = os.path.join(output_path, "temp_class_diagram.puml") + with open(uml_temp_file, "w", encoding="utf-8") as f: f.write(uml_content) - png_file = os.path.join(output_path, "class_diagram.png") - self.plantuml.processes_file(uml_file, outfile=png_file) + png_temp_file = os.path.join(output_path, "temp_class_diagram.png") + try: + self.plantuml.processes_file(uml_temp_file, outfile=png_temp_file) + except PlantUMLHTTPError as e: + error_msg = str(e) + self.log_message(f"❌ ERREUR: {error_msg}") + except Exception as e: + self.log_message(f"❌ ERREUR inattendue: {str(e)}"); + + - return png_file + png_file = os.path.join(output_path, "class_diagram.png") + self.plantuml.processes_file(uml_temp_file, outfile=png_file) + return png_file def generate_documentation(self, classes, output_path): doc_content = "# Documentation du Projet\n\n" + for class_info in classes: doc_content += f"## Classe {class_info['name']}\n\n"