0% found this document useful (0 votes)
22 views16 pages

AI

The document outlines a comprehensive process for analyzing Amazon sales data using machine learning techniques. It includes data loading, cleaning, visualization, model training, and evaluation, employing models like Linear Regression, Random Forest, and various classifiers. The results are saved in a report, and the best-performing model is identified based on accuracy.

Uploaded by

k224728
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
22 views16 pages

AI

The document outlines a comprehensive process for analyzing Amazon sales data using machine learning techniques. It includes data loading, cleaning, visualization, model training, and evaluation, employing models like Linear Regression, Random Forest, and various classifiers. The results are saved in a report, and the best-performing model is identified based on accuracy.

Uploaded by

k224728
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
You are on page 1/ 16

-------------------supervised1------------------------

# Amazon Sale Report - AI Project Code


import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns

from sklearn.model_selection import train_test_split


from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, mean_squared_error

# ============================
# Part 1: Load and Clean Data
# ============================
# Load the dataset
df = pd.read_csv("amazon.csv")

# Display first few rows and column names to check if 'amount' exists
print(df.head())
print("\nColumns in the dataset:", df.columns)

# Ensure the 'amount' column exists before proceeding


if 'amount' not in df.columns:
raise KeyError("'amount' column not found in the dataset. Please check your
data.")

# Drop columns with high missing values or irrelevant ones


df.drop(['customer_id', 'product_id'], axis=1, inplace=True, errors='ignore')

# Handle missing values


df.dropna(inplace=True)

# Encode categorical variables if any


df = pd.get_dummies(df, drop_first=True)

# =============================
# Part 2: Data Visualization
# =============================
plt.figure(figsize=(8, 5))
sns.histplot(df['amount'], bins=30, kde=True)
plt.title("Distribution of Sales Amount")
plt.xlabel("Amount")
plt.ylabel("Frequency")
plt.show()

# Check if 'month' column exists before plotting


if 'month' in df.columns:
plt.figure(figsize=(10, 5))
sns.boxplot(x='month', y='amount', data=df)
plt.title("Monthly Sales Distribution")
plt.show()

# Check if 'category' column exists before plotting


if 'category' in df.columns:
plt.figure(figsize=(12, 5))
df.groupby('category')['amount'].sum().plot(kind='bar')
plt.title("Sales by Product Category")
plt.ylabel("Total Sales Amount")
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()

# =============================
# Part 3: Modeling & Evaluation
# =============================

# Features and target


X = df.drop('amount', axis=1)
y = df['amount']

# Train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)

# Model 1: Linear Regression


lr = LinearRegression()
lr.fit(X_train, y_train)
y_pred_lr = lr.predict(X_test)

# Model 2: Random Forest


rf = RandomForestRegressor(n_estimators=100, random_state=42)
rf.fit(X_train, y_train)
y_pred_rf = rf.predict(X_test)

# Evaluation function
def evaluate_model(name, y_true, y_pred):
r2 = r2_score(y_true, y_pred)
rmse = np.sqrt(mean_squared_error(y_true, y_pred))
return [name, round(r2, 4), round(rmse, 4)]

# Collect results
results = []
results.append(evaluate_model("Linear Regression", y_test, y_pred_lr))
results.append(evaluate_model("Random Forest", y_test, y_pred_rf))

# Convert to DataFrame
results_df = pd.DataFrame(results, columns=["Model", "R2 Score", "RMSE"])
print("\nModel Evaluation Results:")
print(results_df)

# =============================
# Part 4: Save Report
# =============================
results_df.to_csv("model_evaluation_report.csv", index=False)
print("\nReport saved as model_evaluation_report.csv")

---------------------supervised2--------------------------
# Import necessary libraries
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score

# Models
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier

# Part 1: Data Pre-processing

# 1. Import your dataset


# Replace 'amazon_sales.csv' with your actual file name/path
try:
df = pd.read_csv('Report.csv') # or pd.read_excel() for Excel files
print("Data loaded successfully!")
print(f"Dataset shape: {df.shape}")
print("\nFirst 5 rows:")
print(df.head())
except Exception as e:
print(f"Error loading file: {e}")
exit()

# 2. Check for missing values and clean data


print("\nMissing values before cleaning:")
print(df.isnull().sum())

# Handle missing values - adjust based on your data


# For numerical columns
num_cols = df.select_dtypes(include=['int64', 'float64']).columns.tolist()
if 'EZE' in num_cols:
num_cols.remove('EZE') # Remove target variable if it's numeric

# For categorical columns


cat_cols = df.select_dtypes(include=['object', 'category']).columns.tolist()

# Create preprocessing pipelines


num_pipeline = Pipeline([
('imputer', SimpleImputer(strategy='mean')),
('scaler', StandardScaler())
])

cat_pipeline = Pipeline([
('imputer', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder(handle_unknown='ignore'))
])

# Combine pipelines
preprocessor = ColumnTransformer([
('num', num_pipeline, num_cols),
('cat', cat_pipeline, cat_cols)
])

# 3. Separate features and target


# Make sure 'EZE' is your target column name - change if different
if 'EZE' not in df.columns:
print("\nError: 'EZE' column not found. Please identify your target column.")
exit()

X = df.drop('EZE', axis=1)
y = df['EZE']

# Convert target to binary if needed (B2B=1, B2C=0)


if y.dtype == 'object':
y = y.map({'B2B': 1, 'B2C': 0})

# Part 2: Data Splitting and Model Training

# 1. Split data
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42, stratify=y
)

print(f"\nTraining set size: {X_train.shape[0]}")


print(f"Test set size: {X_test.shape[0]}")
print("Class distribution in training set:")
print(y_train.value_counts(normalize=True))

# 2. Create and train models


models = {
'Logistic Regression': LogisticRegression(max_iter=1000,
class_weight='balanced'),
'Decision Tree': DecisionTreeClassifier(max_depth=5, random_state=42),
'Random Forest': RandomForestClassifier(n_estimators=100, random_state=42)
}

# Create a pipeline for each model


trained_models = {}
for name, model in models.items():
pipeline = Pipeline([
('preprocessor', preprocessor),
('classifier', model)
])
pipeline.fit(X_train, y_train)
trained_models[name] = pipeline
print(f"\n{name} trained successfully!")

# 3. Tweak parameters and train again


tweaked_models = {
'Logistic Regression (tweaked)': LogisticRegression(
max_iter=2000, C=0.1, class_weight='balanced', random_state=42
),
'Decision Tree (tweaked)': DecisionTreeClassifier(
max_depth=7, min_samples_split=10, random_state=42
),
'Random Forest (tweaked)': RandomForestClassifier(
n_estimators=200, max_depth=10, random_state=42
)
}

for name, model in tweaked_models.items():


pipeline = Pipeline([
('preprocessor', preprocessor),
('classifier', model)
])
pipeline.fit(X_train, y_train)
trained_models[name] = pipeline
print(f"\n{name} trained successfully!")

# Part 3: Model Evaluation

# 1. Evaluate on training data


print("\nTraining set performance:")
for name, model in trained_models.items():
y_train_pred = model.predict(X_train)
acc = accuracy_score(y_train, y_train_pred)
print(f"{name} - Training Accuracy: {acc:.4f}")

# 2. Evaluate on test data


print("\nTest set performance:")
for name, model in trained_models.items():
y_pred = model.predict(X_test)
acc = accuracy_score(y_test, y_pred)
print(f"\n{name}:")
print(f"Accuracy: {acc:.4f}")
print("Confusion Matrix:")
print(confusion_matrix(y_test, y_pred))
print("Classification Report:")
print(classification_report(y_test, y_pred))

# 3. Cross-validation
print("\nCross-validation results (5-fold):")
X_processed = preprocessor.fit_transform(X) # Preprocess all data for CV
for name, model in models.items():
cv_scores = cross_val_score(
Pipeline([
('preprocessor', preprocessor),
('classifier', model)
]),
X, y, cv=5, scoring='accuracy'
)
print(f"{name} - Mean CV Accuracy: {cv_scores.mean():.4f} (±
{cv_scores.std():.4f})")

# Save the best model if needed


best_model_name = max(trained_models, key=lambda k: accuracy_score(y_test,
trained_models[k].predict(X_test)))
best_model = trained_models[best_model_name]
print(f"\nBest model: {best_model_name}")

# To save the model:


# import joblib
# joblib.dump(best_model, 'best_amazon_sales_model.pkl')

-------------------------------------
supervised2.1-----------------------------------------
# Import necessary libraries
import pandas as pd
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier

# 1. Load dataset
df = pd.read_csv('Report.csv')
print("First 5 rows of data:")
print(df.head())

# 2. Prepare target variable (convert B2B/B2C to 1/0)


df['EZE'] = df['EZE'].map({'B2B': 1, 'B2C': 0})
y = df['EZE']
X = df.drop('EZE', axis=1)

# 3. Handle missing values


# Numerical columns
num_cols = X.select_dtypes(include=['int64', 'float64']).columns
for col in num_cols:
X[col].fillna(X[col].mean(), inplace=True)

# Categorical columns
cat_cols = X.select_dtypes(include=['object']).columns
for col in cat_cols:
X[col].fillna(X[col].mode()[0], inplace=True)

# 4. Encode categorical variables


X_encoded = pd.get_dummies(X, columns=cat_cols)

# 5. Scale numerical features


scaler = StandardScaler()
X_encoded[num_cols] = scaler.fit_transform(X_encoded[num_cols])

# 6. Split data
X_train, X_test, y_train, y_test = train_test_split(X_encoded, y, test_size=0.2,
random_state=42)

# 7. Define and train models


models = {
'Logistic Regression': LogisticRegression(max_iter=1000),
'Decision Tree': DecisionTreeClassifier(max_depth=5),
'Random Forest': RandomForestClassifier(n_estimators=100)
}

for name, model in models.items():


# Train model
model.fit(X_train, y_train)

# Training evaluation
train_pred = model.predict(X_train)
print(f"\n{name} - Training Accuracy: {accuracy_score(y_train,
train_pred):.4f}")

# Test evaluation
test_pred = model.predict(X_test)
print(f"{name} - Test Accuracy: {accuracy_score(y_test, test_pred):.4f}")
print("Confusion Matrix:")
print(confusion_matrix(y_test, test_pred))
print("Classification Report:")
print(classification_report(y_test, test_pred))

# Cross-validation
cv_scores = cross_val_score(model, X_encoded, y, cv=5)
print(f"5-Fold CV Accuracy: {cv_scores.mean():.4f} (±{cv_scores.std():.4f})")

# 8. Find best model


best_model_name = max(models, key=lambda k: accuracy_score(y_test,
models[k].predict(X_test)))
print(f"\nBest performing model: {best_model_name}")

-------------------supervised3-------------------------
# Import necessary libraries
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score

# For models
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier

## Part 1: Data Pre-processing

# 1. Import dataset (I'll use the built-in iris dataset as an example)


from sklearn.datasets import load_iris
data = load_iris()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['target'] = data.target

# Let's artificially modify the dataset to include missing values and categorical
variables
# for demonstration purposes
df.iloc[2:5, 0] = np.nan # Add missing values to first column
df.iloc[10:15, 2] = np.nan # Add missing values to third column
df['category'] = np.random.choice(['A', 'B', 'C'], size=len(df)) # Add categorical
column

print("Original dataset:")
print(df.head())
print("\nMissing values per column:")
print(df.isnull().sum())

# 2. Handle missing values


# For numerical columns, fill with mean
num_cols = df.select_dtypes(include=['float64', 'int64']).columns
for col in num_cols:
if df[col].isnull().sum() > 0:
imputer = SimpleImputer(strategy='mean')
df[col] = imputer.fit_transform(df[[col]])

# 3. Handle categorical variables


# Identify categorical columns
cat_cols = df.select_dtypes(include=['object']).columns

# Use one-hot encoding for categorical variables


df = pd.get_dummies(df, columns=cat_cols)
print("\nAfter handling missing values and categorical variables:")
print(df.head())

# 4. Standardize numerical features (excluding target)


scaler = StandardScaler()
features_to_scale = [col for col in df.columns if col not in ['target'] and
'category_' not in col]
df[features_to_scale] = scaler.fit_transform(df[features_to_scale])

print("\nAfter standardization:")
print(df.head())

# 5. Segregate features and target


X = df.drop('target', axis=1) # Features
y = df['target'] # Target variable

## Part 2: Data Splitting and Model Training

# 1. Split data into training and testing sets


X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)

print(f"\nTraining set size: {X_train.shape[0]} samples")


print(f"Testing set size: {X_test.shape[0]} samples")

# 2. Train three different models


# Model 1: Logistic Regression
log_reg = LogisticRegression(max_iter=200)
log_reg.fit(X_train, y_train)

# Model 2: Decision Tree


tree = DecisionTreeClassifier(max_depth=3)
tree.fit(X_train, y_train)

# Model 3: K-Nearest Neighbors


knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train)

# 3. Tweak parameters and train again


# Let's try different parameters for each model

# Logistic Regression with different parameters


log_reg_tweaked = LogisticRegression(max_iter=500, C=0.1)
log_reg_tweaked.fit(X_train, y_train)

# Decision Tree with different parameters


tree_tweaked = DecisionTreeClassifier(max_depth=5, min_samples_split=5)
tree_tweaked.fit(X_train, y_train)

# KNN with different parameters


knn_tweaked = KNeighborsClassifier(n_neighbors=5, weights='distance')
knn_tweaked.fit(X_train, y_train)

## Part 3: Model Evaluation

# 1. Evaluate models on training data


print("\nTraining set performance:")
models = {
"Logistic Regression": log_reg,
"Decision Tree": tree,
"KNN": knn,
"Logistic Regression (tweaked)": log_reg_tweaked,
"Decision Tree (tweaked)": tree_tweaked,
"KNN (tweaked)": knn_tweaked
}

for name, model in models.items():


y_train_pred = model.predict(X_train)
acc = accuracy_score(y_train, y_train_pred)
print(f"{name} - Training Accuracy: {acc:.4f}")

# 2. Evaluate on testing set


print("\nTesting set performance:")
for name, model in models.items():
y_pred = model.predict(X_test)
acc = accuracy_score(y_test, y_pred)
print(f"\n{name}:")
print(f"Accuracy: {acc:.4f}")
print("Confusion Matrix:")
print(confusion_matrix(y_test, y_pred))
print("Classification Report:")
print(classification_report(y_test, y_pred))

# 3. Perform k-fold cross-validation (using k=5)


print("\nCross-validation results (5-fold):")
for name, model in models.items():
cv_scores = cross_val_score(model, X, y, cv=5)
print(f"{name} - Mean CV Accuracy: {cv_scores.mean():.4f} (±
{cv_scores.std():.4f})")

---bins csp---------

# Step 1: Import OR-Tools


from ortools.sat.python import cp_model

# Step 2: Create a model


model = cp_model.CpModel()

# Step 3: Define data


weights = [4, 8, 1, 4, 2, 6, 3, 5]
num_items = len(weights)
num_bins = 3
capacity = 12

# Step 4: Create decision variables


# bin[i] = bin number (0, 1, or 2) where item i is placed
bins = [model.NewIntVar(0, num_bins - 1, f'item_{i}_bin') for i in
range(num_items)]

# Step 5: Constraints

# Each item must go in exactly one bin -> already ensured by variable domain (0-2)

# Items 0 and 1 must be in different bins


model.Add(bins[0] != bins[1])

# Items 2 and 3 must be in the same bin


model.Add(bins[2] == bins[3])

# Items 4 and 5 must be in different bins


model.Add(bins[4] != bins[5])

# Items 6 and 7 must be in the same bin


model.Add(bins[6] == bins[7])

# Items 6 and 7 must not share bin with item 1


model.Add(bins[6] != bins[1])

# Item 5 is not allowed in bin 0


model.Add(bins[5] != 0)

# Step 6: Bin weight constraints


# For each bin, compute total weight and ensure ≤ 12
for b in range(num_bins):
bin_weight = sum(weights[i] * model.NewBoolVar(f'in_bin{b}_item{i}') for i in
range(num_items))
bin_items = []
for i in range(num_items):
in_bin = model.NewBoolVar(f'in_bin{b}_item{i}')
model.Add(bins[i] == b).OnlyEnforceIf(in_bin)
model.Add(bins[i] != b).OnlyEnforceIf(in_bin.Not())
bin_items.append(in_bin)
# Add capacity constraint
model.Add(sum(bin_items[i] * weights[i] for i in range(num_items)) <= capacity)

# Step 7: Create solver and solve


solver = cp_model.CpSolver()
status = solver.Solve(model)

# Step 8: Print solution


if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
print("Solution Found:\n")
for i in range(num_items):
print(f"Item {i} (weight {weights[i]}) → Bin {solver.Value(bins[i])}")

print("\nAll constraints are satisfied.")


print(f"Solver status: {'Optimal' if status == cp_model.OPTIMAL else
'Feasible'}")
else:
print("No solution found.")

------- Patients and Beds CSP ---

from ortools.sat.python import cp_model

def solve_patients_beds():
model = cp_model.CpModel()

# Beds and their capacities


bed_types = ["ICU", "GeneralCare", "Private", "EmergencyCare"]
bed_capacities = {
"ICU": 2,
"GeneralCare": 3,
"Private": 1,
"EmergencyCare": 2
}

# Patients and their preferences


patients = ["A", "B", "C", "D", "E", "F"]
preferences = {
"A": ["ICU", "GeneralCare"],
"B": ["GeneralCare", "Private"],
"C": ["EmergencyCare", "GeneralCare"],
"D": ["ICU", "Private"],
"E": ["GeneralCare", "EmergencyCare"],
"F": ["EmergencyCare", "Private"]
}

# Conflicting patients who cannot have the same bed type


conflicts = [("A", "B"), ("C", "D")]

# Variables: Each patient assigned a bed type (index)


bed_indices = {bed: i for i, bed in enumerate(bed_types)}
patient_vars = {}
for patient in patients:
domain = [bed_indices[bed] for bed in preferences[patient]]
patient_vars[patient] =
model.NewIntVarFromDomain(cp_model.Domain.FromValues(domain), patient)

# Constraint 1: Capacity of each bed


for bed in bed_types:
count = []
for patient in patients:
count.append(model.NewBoolVar(f"{patient}_{bed}"))
model.Add(patient_vars[patient] ==
bed_indices[bed]).OnlyEnforceIf(count[-1])
model.Add(patient_vars[patient] !=
bed_indices[bed]).OnlyEnforceIf(count[-1].Not())
model.Add(sum(count) <= bed_capacities[bed])

# Constraint 2: Conflicts (no same bed)


for p1, p2 in conflicts:
model.Add(patient_vars[p1] != patient_vars[p2])

# Solve
solver = cp_model.CpSolver()
status = solver.Solve(model)

if status in [cp_model.FEASIBLE, cp_model.OPTIMAL]:


print("\nPatient to Bed Assignments:")
for patient in patients:
bed = bed_types[solver.Value(patient_vars[patient])]
print(f"Patient {patient} -> {bed}")
else:
print("No solution found.")

if __name__ == "__main__":
solve_patients_beds()
--- Teachers and Classes CSP

from ortools.sat.python import cp_model

def solve_teachers_classes():
model = cp_model.CpModel()

# Teachers and Classes


teachers = ["T1", "T2", "T3"]
classes = ["C1", "C2", "C3"]

# Teacher preferences/qualifications
preferences = {
"T1": ["C1", "C3"],
"T2": ["C1", "C3"], # T2 cannot teach C2
"T3": ["C2", "C1", "C3"] # T3 prefers C2
}

class_indices = {cls: i for i, cls in enumerate(classes)}


teacher_vars = {}

for teacher in teachers:


domain = [class_indices[cls] for cls in preferences[teacher]]
teacher_vars[teacher] =
model.NewIntVarFromDomain(cp_model.Domain.FromValues(domain), teacher)

# Constraint 1: Each class assigned to one teacher only


for i in range(len(classes)):
model.AddAllDifferent([teacher_vars[teacher] for teacher in teachers])

# Constraint 2: T3 must teach C2 if possible


model.Add(teacher_vars["T3"] == class_indices["C2"])

# Solve
solver = cp_model.CpSolver()
status = solver.Solve(model)

if status in [cp_model.FEASIBLE, cp_model.OPTIMAL]:


print("\nTeacher to Class Assignments:")
for teacher in teachers:
cls = classes[solver.Value(teacher_vars[teacher])]
print(f"Teacher {teacher} -> {cls}")
else:
print("No solution found.")

if __name__ == "__main__":
solve_teachers_classes()
--------------------------------------------
patients---------------------------------------
from ortools.sat.python import cp_model

def solve_seating_arrangement():
# Create the model
model = cp_model.CpModel()

# Seats: 0 to 5
seats = range(6)
panelists = ['Amir', 'Bella', 'Charles', 'Diana', 'Ethan', 'Farah']

# Create decision variables: key=panelist, value=seat assignment (0-5)


assignments = {}
for p in panelists:
assignments[p] = model.NewIntVar(0, 5, f'{p}_seat')

# Each panelist must have a unique seat


model.AddAllDifferent([assignments[p] for p in panelists])

# Bella must sit to the left of Farah


model.Add(assignments['Bella'] < assignments['Farah'])

# Charles must sit next to Diana (absolute difference of 1)


model.Add(abs(assignments['Charles'] - assignments['Diana']) == 1)

# Amir cannot sit at either end (not 0 or 5)


model.Add(assignments['Amir'] != 0)
model.Add(assignments['Amir'] != 5)

# Ethan must be in seat 2 or 3 (middle)


model.Add(assignments['Ethan'] == 2).OnlyEnforceIf(assignments['Ethan'] == 2)
model.Add(assignments['Ethan'] == 3).OnlyEnforceIf(assignments['Ethan'] == 3)

# Diana cannot sit at either end (not 0 or 5)


model.Add(assignments['Diana'] != 0)
model.Add(assignments['Diana'] != 5)

# Solve the model


solver = cp_model.CpSolver()
status = solver.Solve(model)

# Print the solution


if status == cp_model.FEASIBLE or status == cp_model.OPTIMAL:
print("Solution found:")
seat_assignment = {s: None for s in seats}
for p in panelists:
seat = solver.Value(assignments[p])
seat_assignment[seat] = p
print(f"{p}: S{seat}")

print("\nSeating arrangement (left to right):")


for seat in sorted(seat_assignment.keys()):
print(f"S{seat}: {seat_assignment[seat]}")

# Verify constraints
print("\nVerification:")
bella_seat = solver.Value(assignments['Bella'])
farah_seat = solver.Value(assignments['Farah'])
print(f"- Bella (S{bella_seat}) is left of Farah (S{farah_seat}):
{bella_seat < farah_seat}")

charles_seat = solver.Value(assignments['Charles'])
diana_seat = solver.Value(assignments['Diana'])
print(f"- Charles (S{charles_seat}) is next to Diana (S{diana_seat}):
{abs(charles_seat - diana_seat) == 1}")

amir_seat = solver.Value(assignments['Amir'])
print(f"- Amir is not at either end (S{amir_seat}): {amir_seat not in [0,
5]}")

ethan_seat = solver.Value(assignments['Ethan'])
print(f"- Ethan is in the middle (S{ethan_seat}): {ethan_seat in [2, 3]}")

print(f"- Diana is not at either end (S{diana_seat}): {diana_seat not in


[0, 5]}")

print(f"\nSolver status: {'Optimal' if status == cp_model.OPTIMAL else


'Feasible'} solution found")
else:
print("No solution found")

if __name__ == '__main__':
solve_seating_arrangement()

--------------------------------------------------------------------------
patients easy
from ortools.sat.python import cp_model

def solve_patients_beds():
model = cp_model.CpModel()

# Step 1: Define bed types and how many patients each can hold
bed_types = ["ICU", "GeneralCare", "Private", "EmergencyCare"]
bed_capacities = {
"ICU": 2,
"GeneralCare": 3,
"Private": 1,
"EmergencyCare": 2
}

# Step 2: Define patients and their bed preferences


patients = ["A", "B", "C", "D", "E", "F"]
preferences = {
"A": ["ICU", "GeneralCare"],
"B": ["GeneralCare", "Private"],
"C": ["EmergencyCare", "GeneralCare"],
"D": ["ICU", "Private"],
"E": ["GeneralCare", "EmergencyCare"],
"F": ["EmergencyCare", "Private"]
}

# Step 3: Define conflict pairs (patients who can't share the same bed type)
conflicts = [("A", "B"), ("C", "D")]
# Step 4: Convert bed types to numbers (e.g., "ICU" -> 0)
bed_to_index = {bed: i for i, bed in enumerate(bed_types)}
index_to_bed = {i: bed for bed, i in bed_to_index.items()}

# Step 5: Create variables for each patient based on their preferences


patient_vars = {}
for patient in patients:
allowed_beds = [bed_to_index[bed] for bed in preferences[patient]]
patient_vars[patient] =
model.NewIntVarFromDomain(cp_model.Domain.FromValues(allowed_beds), patient)

# Step 6: Capacity constraints for each bed


for bed in bed_types:
bed_index = bed_to_index[bed]
# Create a list of Boolean flags for whether each patient is assigned to
this bed
bed_flags = []
for patient in patients:
flag = model.NewBoolVar(f"{patient}_in_{bed}")
model.Add(patient_vars[patient] == bed_index).OnlyEnforceIf(flag)
model.Add(patient_vars[patient] != bed_index).OnlyEnforceIf(flag.Not())
bed_flags.append(flag)
model.Add(sum(bed_flags) <= bed_capacities[bed]) # limit patients per bed
type

# Step 7: Add conflict constraints (conflicting patients can't have the same
bed)
for p1, p2 in conflicts:
model.Add(patient_vars[p1] != patient_vars[p2])

# Step 8: Solve the model


solver = cp_model.CpSolver()
status = solver.Solve(model)

# Step 9: Print the result


if status in (cp_model.FEASIBLE, cp_model.OPTIMAL):
print("\nPatient to Bed Assignments:")
for patient in patients:
assigned_bed = index_to_bed[solver.Value(patient_vars[patient])]
print(f"Patient {patient} → {assigned_bed}")
else:
print("No solution found.")

# Run the function


solve_patients_beds()

------------------------------------------------------------------------
a
seats = {
'Amir': model.NewIntVar(0, 5, 'Amir'),
'Bella': model.NewIntVar(0, 5, 'Bella'),
'Charles': model.NewIntVar(0, 5, 'Charles'),
'Diana': model.NewIntVar(0, 5, 'Diana'),
'Ethan': model.NewIntVar(0, 5, 'Ethan'),
'Farah': model.NewIntVar(0, 5, 'Farah')
}

b
from ortools.sat.python import cp_model

def solve_seating():
model = cp_model.CpModel()

# Panelists
panelists = ['Amir', 'Bella', 'Charles', 'Diana', 'Ethan', 'Farah']

# Create variables: Each panelist is assigned a seat from 0 to 5


seats = {p: model.NewIntVar(0, 5, p) for p in panelists}

# Constraint 1: All panelists must be in different seats


model.AddAllDifferent(seats.values())

# Constraint 2: Bella must sit to the left of Farah


model.Add(seats['Bella'] < seats['Farah'])

# Constraint 3: Charles sits next to Diana


model.AddAbsEquality(model.NewIntVar(1, 1, 'charles_diana_diff'),
seats['Charles'] - seats['Diana'])

# Constraint 4: Amir cannot be on either end


model.Add(seats['Amir'] != 0)
model.Add(seats['Amir'] != 5)

# Constraint 5: Ethan must be in seat 2 or 3


model.AddAllowedAssignments([seats['Ethan']], [[2], [3]])

# Constraint 6: Diana must not be at seat 0 or 5


model.Add(seats['Diana'] != 0)
model.Add(seats['Diana'] != 5)

# Solve the model


solver = cp_model.CpSolver()
status = solver.Solve(model)

# Print results
if status in (cp_model.FEASIBLE, cp_model.OPTIMAL):
print("Solution Found:\n")
seat_to_person = [''] * 6
for person, var in seats.items():
seat_to_person[solver.Value(var)] = person
for i, person in enumerate(seat_to_person):
print(f"Seat {i}: {person}")
print(f"\nSolver status: {'OPTIMAL' if status == cp_model.OPTIMAL else
'FEASIBLE'}")
else:
print("No solution found.")

# Run the function


solve_seating()

You might also like

pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy