import os import pandas as pd from datetime import datetime #from openpyxl import load_workbook from IPython.display import FileLink, display from Eversense4_API_2 import Eversense4_API import numpy as np import random from sklearn.preprocessing import MinMaxScaler import pickle import gradio as gr import plotly.graph_objects as go # Load credentials from environment variables USERNAME = os.getenv("EVERSENSE_USERNAME", "pgautam@forbesmarshall.com") PASSWORD = os.getenv("EVERSENSE_PASSWORD", "pranjal123@") # Initialize API object apiObj = Eversense4_API(host="https://eversense.forbesmarshall.com") # Log in securely apiObj.login(USERNAME, PASSWORD) # Define start and end timestamps start = datetime(2024, 12, 1) # Fixed start date end = datetime.now() # Current time startTS = int(start.timestamp() * 1000) # Convert to milliseconds endTS = int(end.timestamp() * 1000) #print("Start Timestamp:", startTS) #print("End Timestamp:", endTS) # Fetch telemetry data keys = [ 'CLD_TTL_JUICE_FLOW', 'CLD_SK1_FM1_FLOW', 'CLD_SK2_FM1_FLOW', 'CLD_SK3_FM1_FLOW', 'CLD_SK4_FM1_FLOW', 'CLD_SK5_FM1_FLOW', 'CLD_FT_TPS' ] telemetry_response = apiObj.getDeviceDataValues( '51f7d6d0-9dd0-11ef-9e72-3915b0e66e63', keys=keys, startTS=startTS, endTS=endTS, interval=60000, limit=500000 ) # Process data df = apiObj.processData(telemetry_response) # Convert all columns except 'timestamp' to float if 'timestamp' in df.columns: df.loc[:, df.columns != 'timestamp'] = df.loc[:, df.columns != 'timestamp'].apply(pd.to_numeric, errors='coerce') else: df = df.apply(pd.to_numeric, errors='coerce') df = df.drop(columns=['Timestamp','ts']) df.reset_index(inplace=True) # Define the column mapping column_mapping = { 'CLD_TTL_JUICE_FLOW': 'total_juice_flow', 'CLD_SK1_FM1_FLOW': 'SK1_juice_flow', 'CLD_SK2_FM1_FLOW': 'SK2_juice_flow', 'CLD_SK3_FM1_FLOW': 'SK3_juice_flow', 'CLD_SK4_FM1_FLOW': 'SK4_juice_flow', 'CLD_SK5_FM1_FLOW': 'SK5_juice_flow', 'CLD_FT_TPS': 'total_steam_flow', 'Timestamp': 'Timestamp' } # Rename the columns in the DataFrame df.rename(columns=column_mapping, inplace=True) #df df['Steam_Economy'] = df.apply( lambda row: row['total_juice_flow'] / row['total_steam_flow'] if row['total_steam_flow'] != 0 else None, axis=1 ) columns_to_check = ['SK1_juice_flow', 'SK2_juice_flow', 'SK3_juice_flow', 'SK4_juice_flow', 'SK5_juice_flow', 'total_juice_flow', 'total_steam_flow', 'Steam_Economy'] df = df[(df[columns_to_check] >= 0).all(axis=1)] import numpy as np # Define columns for outlier detection columns_to_check = ['SK1_juice_flow', 'SK2_juice_flow', 'SK3_juice_flow', 'SK4_juice_flow', 'SK5_juice_flow', 'total_juice_flow', 'total_steam_flow', 'Steam_Economy'] # Function to remove outliers using IQR def remove_outliers_iqr(df, columns): for col in columns: Q1 = df[col].quantile(0.25) # First quartile (25th percentile) Q3 = df[col].quantile(0.75) # Third quartile (75th percentile) IQR = Q3 - Q1 # Interquartile range lower_bound = Q1 - 1.5 * IQR upper_bound = Q3 + 1.5 * IQR # Remove rows where the column value is outside the IQR range df = df[(df[col] >= lower_bound) & (df[col] <= upper_bound)] return df # Remove negative values first df = df[(df[columns_to_check] >= 0).all(axis=1)] # Apply IQR outlier removal df = remove_outliers_iqr(df, columns_to_check) #allocation of steam # Define operating steam ranges steam_limits = { "SK1_steam_flow": (110, 140), "SK2_steam_flow": (90, 110), "SK3_steam_flow": (90, 110), "SK4_steam_flow": (80, 100), "SK5_steam_flow": (110, 140), } # Function to distribute steam dynamically def distribute_steam(df): df = df.copy() # Work on a copy to avoid modifying original dataframe # Create new columns for steam flows for sk in ["SK1_steam_flow", "SK2_steam_flow", "SK3_steam_flow", "SK4_steam_flow", "SK5_steam_flow"]: df[sk] = 0.0 for i, row in df.iterrows(): total_steam = row["total_steam_flow"] # Get juice flow values juice_flows = { "SK1_juice_flow": row["SK1_juice_flow"], "SK2_juice_flow": row["SK2_juice_flow"], "SK3_juice_flow": row["SK3_juice_flow"], "SK4_juice_flow": row["SK4_juice_flow"], "SK5_juice_flow": row["SK5_juice_flow"], } # Identify operational SKs (juice flow > 20) operational_sks = [sk for sk, val in juice_flows.items() if val > 20] # Ensure exactly 3 SKs are operational if len(operational_sks) > 3: operational_sks = operational_sks[:3] elif len(operational_sks) < 3: continue # Skip row if less than 3 SKs are operational # Decide which of SK1 or SK5 should run if "SK1_juice_flow" in operational_sks and "SK5_juice_flow" in operational_sks: if row["SK1_juice_flow"] > row["SK5_juice_flow"]: operational_sks.remove("SK5_juice_flow") else: operational_sks.remove("SK1_juice_flow") # Allocate steam based on distribution percentages steam_distribution = {} if "SK1_juice_flow" in operational_sks: steam_distribution["SK1_steam_flow"] = 0.5 * total_steam elif "SK5_juice_flow" in operational_sks: steam_distribution["SK5_steam_flow"] = 0.5 * total_steam if "SK4_juice_flow" in operational_sks: steam_distribution["SK4_steam_flow"] = 0.2 * total_steam remaining_steam = total_steam - sum(steam_distribution.values()) sk2_sk3_count = sum(1 for sk in ["SK2_juice_flow", "SK3_juice_flow"] if sk in operational_sks) if sk2_sk3_count > 0: per_sk_steam = remaining_steam / sk2_sk3_count if "SK2_juice_flow" in operational_sks: steam_distribution["SK2_steam_flow"] = per_sk_steam if "SK3_juice_flow" in operational_sks: steam_distribution["SK3_steam_flow"] = per_sk_steam # Adjust for steam limits dynamically total_allocated = sum(steam_distribution.values()) if total_allocated > total_steam: factor = total_steam / total_allocated steam_distribution = {k: v * factor for k, v in steam_distribution.items()} # Ensure individual steam values lie within their respective limits for sk, steam_value in steam_distribution.items(): min_val, max_val = steam_limits[sk] if steam_value < min_val: steam_distribution[sk] = min_val elif steam_value > max_val: steam_distribution[sk] = max_val # Normalize again to ensure sum equals total_steam_flow total_allocated = sum(steam_distribution.values()) if total_allocated > 0: factor = total_steam / total_allocated steam_distribution = {k: v * factor for k, v in steam_distribution.items()} # Assign values to DataFrame for sk, val in steam_distribution.items(): df.at[i, sk] = float(val) return df # Apply the function df = distribute_steam(df) # Set random seed for reproducibility random.seed(42) # Choose a seed value np.random.seed(42) # Ensure numpy randomness is also controlled class QLearningOptimizer: def __init__(self, alpha=0.1, gamma=0.9, epsilon=0.2): self.alpha = alpha # Learning rate self.gamma = gamma # Discount factor self.epsilon = epsilon # Exploration rate self.q_table = {} # Q-table as a dictionary def get_q_values(self, state): """Retrieve Q-values for a given state, initialize if unseen""" return self.q_table.setdefault(state, np.zeros(3)) # 3 actions (decrease, maintain, increase) def choose_action(self, state): """Epsilon-greedy action selection""" if random.uniform(0, 1) < self.epsilon: return random.choice([-1]) # Only explore: decrease steam flow else: return np.argmax(self.get_q_values(state)) - 1 # Exploit: best learned action (decrease) def update_q_table(self, state, action, reward, next_state): """Update Q-values using Bellman equation""" q_values = self.get_q_values(state) next_q_values = self.get_q_values(next_state) best_next_q = np.max(next_q_values) q_values[action + 1] = q_values[action + 1] + self.alpha * (reward + self.gamma * best_next_q - q_values[action + 1]) self.q_table[state] = q_values # Store updated Q-values def save_model(self, filename="q_table.pkl"): with open(filename, "wb") as f: pickle.dump(self.q_table, f) def load_model(self, filename="q_table.pkl"): with open(filename, "rb") as f: self.q_table = pickle.load(f) def calculate_reward(steam_economy): """Reward function based on Steam Economy closeness to 2.75""" return -abs(steam_economy - 2.75) # Negative deviation penalty def optimize_steam_flow(df): """Optimize steam flow using Q-learning""" ql = QLearningOptimizer() latest_data = df.iloc[-1] # Get the latest timestamp data # Identify operational SKs (Steam flow > 0) operational_sks = [sk for sk in ['SK1_steam_flow', 'SK2_steam_flow', 'SK3_steam_flow', 'SK4_steam_flow', 'SK5_steam_flow'] if latest_data[sk] > 0] if len(operational_sks) != 3: print("Warning: Data should have exactly 3 operational SKs.") return None total_steam_flow = latest_data['total_steam_flow'] steam_economy = latest_data['Steam_Economy'] # Normalize the state state = tuple([latest_data[sk] for sk in operational_sks] + [total_steam_flow]) # Choose actions for each operational SK (Only Decrease Allowed) recommendations = {} for sk in operational_sks: action = -1 # Force reduction reduction = random.randint(10, 15) # Ensure reduction is between 10 and 15 recommended_value = max(0, latest_data[sk] - reduction) # Prevent negative values recommendations[sk] = recommended_value # Ensure total recommended steam flow is reduced total_reduction = random.randint(10, 15) recommended_total_steam_flow = max(0, total_steam_flow - total_reduction) # Adjust individual SK flows proportionally if needed total_recommended_sk_flow = sum(recommendations.values()) difference = recommended_total_steam_flow - total_recommended_sk_flow if abs(difference) > 0: # If there's a mismatch, adjust proportionally sk_adjustments = {sk: recommendations[sk] + (difference * (recommendations[sk] / total_recommended_sk_flow)) for sk in operational_sks} recommendations = {sk: max(0, round(val)) for sk, val in sk_adjustments.items()} # Ensure no negatives # Ensure total recommended steam flow is strictly less than current assert sum(recommendations.values()) < total_steam_flow, "Error: Steam consumption should be reduced!" # New state after taking actions new_state = tuple(list(recommendations.values()) + [recommended_total_steam_flow]) reward = calculate_reward(steam_economy) # Update Q-table for each SK independently for sk in operational_sks: ql.update_q_table(state, action, reward, new_state) # Save the updated model ql.save_model() # Return current and recommended steam flow values return { "current": {sk: latest_data[sk] for sk in operational_sks}, "recommended": recommendations, "current_total_steam_flow": total_steam_flow, "recommended_total_steam_flow": sum(recommendations.values()) # Ensure it matches } # Call function with your dataframe 'df' result = optimize_steam_flow(df) # Display result # if result: # print("Current Steam Flow:", result["current"]) # print("Recommended Steam Flow:", result["recommended"]) # print("Current Total Steam Flow:", result["current_total_steam_flow"]) # print("Recommended Total Steam Flow:", result["recommended_total_steam_flow"]) # Use the 'optimize_steam_flow' function and other helper functions from the previous section def create_gradio_interface(df): def display_results(): result = optimize_steam_flow(df) # Call the Q-learning optimization if result: # Prepare bar chart data for recommended steam flows recommended_steam_flows = list(result["recommended"].values()) sks = list(result["recommended"].keys()) # Plotly Bar Chart for Recommended Steam Flow per SK bar_chart = go.Figure(data=[ go.Bar( x=sks, y=recommended_steam_flows, marker_color='skyblue', text=[f"{val:.2f} TPH" for val in recommended_steam_flows], textposition='auto' ) ]) bar_chart.update_layout( title='Recommended Steam Flow per SK (TPH)', xaxis_title='SK', yaxis_title='Steam Flow (TPH)', template='plotly_dark', plot_bgcolor='#121212', paper_bgcolor='#121212', font=dict(color='white'), margin=dict(l=50, r=50, t=50, b=50) ) # Prepare Comparison Chart (Current vs. Recommended Total Steam Flow) total_steam_flow = result["current_total_steam_flow"] recommended_total_steam_flow = result["recommended_total_steam_flow"] # Plotly Comparison Bar Chart for Total Steam Flow comparison_chart = go.Figure(data=[ go.Bar( x=['Current', 'Recommended'], y=[total_steam_flow, recommended_total_steam_flow], marker_color=['orange', 'lightgreen'], text=[f"{total_steam_flow:.2f} TPH", f"{recommended_total_steam_flow:.2f} TPH"], textposition='auto' ) ]) comparison_chart.update_layout( title='Total Steam Flow Comparison (TPH)', xaxis_title='Steam Flow Type', yaxis_title='Steam Flow (TPH)', template='plotly_dark', plot_bgcolor='#121212', paper_bgcolor='#121212', font=dict(color='white'), margin=dict(l=50, r=50, t=50, b=50) ) # Stylish Steam Flow Summary summary = { "Current Total Steam Flow (TPH)": f"{total_steam_flow:.2f}", "Recommended Total Steam Flow (TPH)": f"{recommended_total_steam_flow:.2f}", "Recommended Steam Flows for SKs (TPH)": {k: f"{v:.2f}" for k, v in result["recommended"].items()} } # Custom HTML to style the summary in a visually appealing way with white text summary_html = f"""
Current Total Steam Flow (TPH): {summary["Current Total Steam Flow (TPH)"]} TPH
Recommended Total Steam Flow (TPH): {summary["Recommended Total Steam Flow (TPH)"]} TPH
Recommended Steam Flows for SKs (TPH):