-
-
Notifications
You must be signed in to change notification settings - Fork 10
Description
reuter
import tensorflow as tf
from tensorflow.keras.datasets import reuters
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
(train_data, train_labels), (test_data, test_labels) = reuters.load_data(
num_words=10000)
word_index = reuters.get_word_index()
reverse_word_index = dict(
[(value, key) for (key, value) in word_index.items()])
decoded_newswire = " ".join(
[reverse_word_index.get(i - 3, "?") for i in train_data[0]])
import numpy as np
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
for j in sequence:
results[i, j] = 1.
return results
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
train_labels_2=np.array(train_labels).reshape(-1,1)
test_labels_2=np.array(test_labels).reshape(-1,1)
Encoder=OneHotEncoder(sparse_output=False)
Y_train=Encoder.fit_transform(train_labels_2)
Y_test=Encoder.fit_transform(test_labels_2)
model =Sequential([
Dense(64, activation="relu"),
Dense(64, activation="relu"),
Dense(46, activation="softmax"),
])
model.compile(optimizer="adam",loss="categorical_crossentropy",metrics=["accuracy"])
model.fit(x_train,Y_train,epochs=20,batch_size=1024)
loss,accu=model.evaluate(x_test,Y_test)
print("loss of the model is",loss)
print(" accuracy of the model is",accu)
leenet
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, AveragePooling2D, Flatten, Dense, InputLayer
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
import numpy as np
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.pad(x_train, ((0,0),(2,2),(2,2)), mode='constant')
x_test = np.pad(x_test, ((0,0),(2,2),(2,2)), mode='constant')
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0
x_train = x_train[..., np.newaxis] # (n, 32, 32, 1)
x_test = x_test[..., np.newaxis]
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
model = Sequential([
InputLayer(input_shape=(32, 32, 1)),
Conv2D(6, kernel_size=5, activation='relu', padding='same'),
AveragePooling2D(pool_size=(2,2)),
Conv2D(16, kernel_size=5, activation='relu'),
AveragePooling2D(pool_size=(2,2)),
Flatten(),
Dense(120, activation='relu'),
Dense(84, activation='relu'),
Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=64, epochs=10, validation_split=0.1)
loss, acc = model.evaluate(x_test, y_test)
print(f"Test Accuracy: {acc:.4f}")
autoencoder
import tensorflow as tf
from tensorflow.keras import layers, models
import matplotlib.pyplot as plt
1. Load and preprocess MNIST dataset
(x_train, _), (x_test, _) = tf.keras.datasets.mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train[..., tf.newaxis] # Add channel dimension
x_test = x_test[..., tf.newaxis]
2. Define the Autoencoder
input_img = tf.keras.Input(shape=(28, 28, 1))
Encoder
x = layers.Flatten()(input_img)
x = layers.Dense(128, activation='relu')(x)
encoded = layers.Dense(64, activation='relu')(x)
Decoder
x = layers.Dense(128, activation='relu')(encoded)
x = layers.Dense(28 * 28, activation='sigmoid')(x)
decoded = layers.Reshape((28, 28, 1))(x)
Autoencoder Model
autoencoder = tf.keras.Model(input_img, decoded)
autoencoder.compile(optimizer='adam', loss='mse')
3. Train the Model
autoencoder.fit(x_train, x_train,
epochs=5,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
4. Visualize Original vs Reconstructed Images
decoded_imgs = autoencoder.predict(x_test[:10])
plt.figure(figsize=(10, 4))
for i in range(10):
# Original
plt.subplot(2, 10, i + 1)
plt.imshow(x_test[i].squeeze(), cmap='gray')
plt.axis('off')
# Reconstructed
plt.subplot(2, 10, i + 11)
plt.imshow(decoded_imgs[i].squeeze(), cmap='gray')
plt.axis('off')
plt.tight_layout()
plt.show()
============-----------------------------------------------------------------------------------------
mnist
import numpy as np
from sklearn.datasets import fetch_openml
mnist=fetch_openml('mnist_784',version=1,return_X_y=True)
X,Y=mnist
X=X.astype(np.float32)/255.0
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.25,random_state=42)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train) # Fit and transform only on training data
X_test = scaler.transform(X_test) # Only transform test data using the same scaler
from sklearn.neural_network import MLPClassifier
classifier=MLPClassifier(hidden_layer_sizes=(100,256),activation='relu',solver='adam',random_state=42,batch_size='auto',alpha=0.0001)
classifier.fit(X_train,Y_train)
Y_pred=classifier.predict(X_test)
from sklearn.metrics import accuracy_score,confusion_matrix,f1_score
accuracy=accuracy_score(Y_pred,Y_test)
print(accuracy)
imdb
import tensorflow as tf
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding,Flatten, Dense
(X_train,y_train),(X_test,y_test) = imdb.load_data(num_words=10000)
X_train=pad_sequences(X_train,maxlen=500)
X_test=pad_sequences(X_test,maxlen=500)
model=Sequential([
Embedding(input_dim=10000,output_dim=32,input_length=500),
Flatten(),
Dense(64,activation='relu'),
Dense(1,activation='sigmoid')
])
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
model.fit(X_train,y_train,epochs=8,batch_size=64,validation_split=0.2)
loss, accuracy = model.evaluate(X_test, y_test)
print("Test Accuracy:", accuracy)
lstm
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
from sklearn.preprocessing import MinMaxScaler
def generate_series(n_points=500):
x = np.arange(n_points)
series = np.sin(0.1 * x) + np.random.normal(scale=0.1, size=n_points)
return series
series = generate_series()
scaler = MinMaxScaler()
series_scaled = scaler.fit_transform(series.reshape(-1, 1))
def create_sequences(data, seq_length):
X, y = [], []
for i in range(len(data) - seq_length):
X.append(data[i:i+seq_length])
y.append(data[i+seq_length])
return np.array(X), np.array(y)
seq_length = 20
X, y = create_sequences(series_scaled, seq_length)
split = int(len(X) * 0.8)
X_train, X_test = X[:split], X[split:]
y_train, y_test = y[:split], y[split:]
model = Sequential([
LSTM(64, input_shape=(seq_length, 1), return_sequences=False),
Dense(32, activation='relu'),
Dense(1)
])
model.compile(optimizer='adam', loss='mse')
model.fit(X_train, y_train, epochs=20, batch_size=32, validation_split=0.1)
y_pred = model.predict(X_test)
y_pred_inverse = scaler.inverse_transform(y_pred)
y_test_inverse = scaler.inverse_transform(y_test)
plt.figure(figsize=(12, 6))
plt.plot(range(len(y_test_inverse)), y_test_inverse, label='True')
plt.plot(range(len(y_pred_inverse)), y_pred_inverse, label='Predicted')
plt.title("LSTM Forecast")
plt.xlabel("Time Step")
plt.ylabel("Value")
plt.legend()
plt.show()
housingproject
import tensorflow as tf
from sklearn.datasets import fetch_california_housing
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import numpy as np
data=fetch_california_housing()
X=data.data
Y=data.target
data.feature_names
scale=StandardScaler()
X_Scale=scale.fit_transform(X)
X_train,X_test,Y_train,Y_test=train_test_split(X_Scale,Y,test_size=0.3,random_state=42)
model=Sequential([
Dense(128,activation='relu',input_shape=(X_train.shape[1],)),
Dense(64,activation='relu'),
Dense(32,activation='relu'),
Dense(1)
])
model.compile(optimizer='adam',loss='mse',metrics=['mae'])
model.fit(X_train,Y_train,epochs=60,batch_size=64,validation_split=0.2,verbose=1)
loss, mae = model.evaluate(X_test, Y_test)
print(("Mean Absolute Error"),mae)