🤖
TPs Intelligence Artificielle
3 Projets : Regression, Classification, Reseau de Neurones
🐍 Python
📊 Scikit-learn
🧠 TensorFlow
TP1 : Regression Lineaire
Prediction de valeurs continues
🎯 Objectifs
- Comprendre le principe de la regression lineaire
- Preparer et normaliser les donnees
- Entrainer un modele avec scikit-learn
- Évaluer les performances (MSE, R²)
Code : Regression temperature → consommation
# TP1 : Regression Lineaire # Predire la consommation electrique selon la temperature import numpy as np import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score # Donnees simulees np.random.seed(42) temperature = np.random.uniform(0, 40, 100) # 0-40°C # Consommation = base + effet temperature + bruit consommation = 50 + 2 * temperature + np.random.normal(0, 5, 100) # Reshape pour sklearn X = temperature.reshape(-1, 1) y = consommation # Division train/test (80%/20%) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) # Creer et entrainer le modele model = LinearRegression() model.fit(X_train, y_train) # Predictions y_pred = model.predict(X_test) # Metriques mse = mean_squared_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) print(f"Coefficient (pente): {model.coef_[0]:.2f}") print(f"Intercept: {model.intercept_:.2f}") print(f"MSE: {mse:.2f}") print(f"R²: {r2:.2f}") # Visualisation plt.figure(figsize=(10, 6)) plt.scatter(X_test, y_test, color='blue', label='Donnees reelles') plt.plot(X_test, y_pred, color='red', linewidth=2, label='Regression') plt.xlabel('Temperature (°C)') plt.ylabel('Consommation (kWh)') plt.title('Regression Lineaire: Temperature → Consommation') plt.legend() plt.grid(True) plt.savefig('regression.png') plt.show()
TP2 : Classification KNN
Classification de capteurs defectueux
Code : Classification de defauts capteurs
# TP2 : Classification KNN # Classifier les capteurs : Normal, Defaut, Hors-service from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.metrics import confusion_matrix, classification_report import numpy as np # Donnees: [variance, ecart_moyen, pics_anormaux] # Classes: 0=Normal, 1=Defaut, 2=Hors-service X = np.array([ [0.5, 0.2, 1], [0.6, 0.3, 2], [0.4, 0.1, 0], # Normal [2.5, 1.5, 8], [3.0, 2.0, 10], [2.8, 1.8, 7], # Defaut [8.0, 5.0, 25], [10.0, 6.0, 30], [9.5, 5.5, 28] # Hors-service ]) y = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]) # Normalisation scaler = StandardScaler() X_scaled = scaler.fit_transform(X) # Modele KNN (k=3 voisins) knn = KNeighborsClassifier(n_neighbors=3) knn.fit(X_scaled, y) # Nouveau capteur a classifier nouveau_capteur = np.array([[2.0, 1.2, 6]]) nouveau_scaled = scaler.transform(nouveau_capteur) prediction = knn.predict(nouveau_scaled) labels = ['Normal', 'Defaut', 'Hors-service'] print(f"Prediction: {labels[prediction[0]]}") # Probabilites proba = knn.predict_proba(nouveau_scaled) print(f"Probabilites: {dict(zip(labels, proba[0]))}")
TP3 : Reseau de Neurones (MLP)
Classification multi-classes avec TensorFlow/Keras
Code : Reseau de neurones pour classification
# TP3 : Reseau de Neurones MLP # Classification de signaux capteurs import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout from sklearn.preprocessing import StandardScaler import numpy as np # Generer donnees synthetiques np.random.seed(42) n_samples = 300 # 3 classes de signaux X = np.vstack([ np.random.normal([0, 0], 1, (n_samples, 2)), # Classe 0 np.random.normal([3, 3], 1, (n_samples, 2)), # Classe 1 np.random.normal([-3, 3], 1, (n_samples, 2)) # Classe 2 ]) y = np.array([0]*n_samples + [1]*n_samples + [2]*n_samples) # Normalisation scaler = StandardScaler() X = scaler.fit_transform(X) # One-hot encoding y_onehot = tf.keras.utils.to_categorical(y, 3) # Architecture du reseau model = Sequential([ Dense(16, activation='relu', input_shape=(2,)), Dropout(0.2), Dense(8, activation='relu'), Dense(3, activation='softmax') # 3 classes ]) model.compile( optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] ) # Entrainement history = model.fit(X, y_onehot, epochs=50, validation_split=0.2, verbose=1) # Évaluation loss, accuracy = model.evaluate(X, y_onehot) print(f"\nPrecision: {accuracy*100:.1f}%") # Prediction nouveau = scaler.transform([[1, 1]]) pred = model.predict(nouveau) print(f"Prediction: Classe {np.argmax(pred)}")
Criteres d'evaluation
| Critere | Points |
|---|---|
| TP1 : Regression lineaire + visualisation | /6 |
| TP2 : Classification KNN + metriques | /6 |
| TP3 : Reseau de neurones fonctionnel | /8 |
| Total | /20 |