tests RL
parent
d9ac469a41
commit
64c96749d6
@ -0,0 +1,187 @@
|
|||||||
|
import mininet
|
||||||
|
import time, socket, random
|
||||||
|
import numpy as np
|
||||||
|
from mininet.cli import CLI
|
||||||
|
from mininet.log import setLogLevel
|
||||||
|
from mininet.net import Mininet
|
||||||
|
from mininet.topo import Topo
|
||||||
|
from mininet.link import TCLink
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
# Define custom topology
|
||||||
|
class MyTopology(Topo):
|
||||||
|
def build(self):
|
||||||
|
# Create switches
|
||||||
|
s1 = self.addSwitch('s1')
|
||||||
|
s2 = self.addSwitch('s2')
|
||||||
|
|
||||||
|
# Create hosts
|
||||||
|
h1 = self.addHost('h1')
|
||||||
|
h2 = self.addHost('h2')
|
||||||
|
|
||||||
|
# Add links
|
||||||
|
self.addLink(h1, s1, cls=TCLink, delay='10ms', bw=1)
|
||||||
|
self.addLink(s1, s2, cls=TCLink, delay='50ms', bw=0.5)
|
||||||
|
self.addLink(s2, h2, cls=TCLink, delay='10ms', bw=1)
|
||||||
|
|
||||||
|
# Define TCP agent
|
||||||
|
class MyTCPAgent:
|
||||||
|
def __init__(self):
|
||||||
|
# Initialize TCP agent
|
||||||
|
self.transmission_rounds = []
|
||||||
|
self.congestion_window_sizes = []
|
||||||
|
|
||||||
|
|
||||||
|
def handle_connection(self):
|
||||||
|
print("TCP connection establishment")
|
||||||
|
# Implement TCP connection establishment
|
||||||
|
|
||||||
|
# Example TCP connection establishment using a socket
|
||||||
|
# self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
# self.sock.connect(('localhost', 19191))
|
||||||
|
|
||||||
|
# Example TCP connection establishment simulation
|
||||||
|
time.sleep(1) # Simulating connection establishment delay
|
||||||
|
|
||||||
|
def handle_data_transfer(self, tunnel, window_size):
|
||||||
|
print("Performing data transfer with tunnel:", tunnel, "and window size:", window_size)
|
||||||
|
# Implement TCP data transfer with the given tunnel and window size using socket programming
|
||||||
|
|
||||||
|
# Implement TCP data transfer simulation
|
||||||
|
|
||||||
|
# Placeholder logic: Simulating data transfer
|
||||||
|
time.sleep(0.1) # Simulating data transfer delay
|
||||||
|
|
||||||
|
# Simulate congestion control by waiting for a fixed amount of time
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
# Record transmission round and final congestion window size
|
||||||
|
self.transmission_rounds.append(len(self.transmission_rounds) + 1)
|
||||||
|
self.congestion_window_sizes.append(window_size)
|
||||||
|
|
||||||
|
# Define RL agent for tunnel selection
|
||||||
|
class TunnelSelectionAgent:
|
||||||
|
def __init__(self, tunnels):
|
||||||
|
# Initialize your RL agent for tunnel selection
|
||||||
|
self.rewards = []
|
||||||
|
self.selected_tunnels = []
|
||||||
|
self.tunnels = tunnels
|
||||||
|
self.current_tunnel = 0
|
||||||
|
|
||||||
|
def select_tunnel(self):
|
||||||
|
# Implement tunnel selection based on RL policy
|
||||||
|
# Placeholder logic: Select a tunnel randomly or based on some criteria
|
||||||
|
selected_tunnel = self.tunnels[self.current_tunnel]
|
||||||
|
self.current_tunnel = (self.current_tunnel + 1) % len(self.tunnels)
|
||||||
|
return selected_tunnel
|
||||||
|
|
||||||
|
def update_policy(self, reward):
|
||||||
|
# Implement RL policy update based on rewards
|
||||||
|
self.rewards.append(reward)
|
||||||
|
self.selected_tunnels.append(self.select_tunnel())
|
||||||
|
|
||||||
|
# Define RL agent for window prediction
|
||||||
|
class WindowPredictionAgent:
|
||||||
|
def __init__(self):
|
||||||
|
# Initialize the RL agent for window prediction
|
||||||
|
self.window_sizes = [1, 2, 4, 8, 16, 32, 64] # Possible window sizes
|
||||||
|
self.alpha = 0.1 # Learning rate
|
||||||
|
self.gamma = 0.9 # Discount factor
|
||||||
|
self.q_table = {} # Q-table to store state-action values
|
||||||
|
|
||||||
|
def predict_window_size(self):
|
||||||
|
# Get the current state (e.g., network conditions)
|
||||||
|
state = self.get_state()
|
||||||
|
|
||||||
|
# Check if the state is in the Q-table
|
||||||
|
if state not in self.q_table:
|
||||||
|
# Initialize Q-values for all possible actions in the current state
|
||||||
|
self.q_table[state] = {window_size: 0 for window_size in self.window_sizes}
|
||||||
|
|
||||||
|
# Choose the action (window size) based on epsilon-greedy policy
|
||||||
|
if random.random() < 0.2: # Exploration (20% of the time)
|
||||||
|
action = random.choice(self.window_sizes)
|
||||||
|
else: # Exploitation (80% of the time)
|
||||||
|
action = self.get_best_action(state)
|
||||||
|
|
||||||
|
return action
|
||||||
|
|
||||||
|
def get_state(self):
|
||||||
|
# Implement the logic to determine the current state based on network conditions
|
||||||
|
# For example, you can consider factors such as round-trip time, packet loss rate, or congestion signals
|
||||||
|
|
||||||
|
# Placeholder logic: Return a random state
|
||||||
|
return random.randint(1, 10)
|
||||||
|
|
||||||
|
def get_best_action(self, state):
|
||||||
|
# Find the action (window size) with the highest Q-value for the given state
|
||||||
|
best_action = max(self.q_table[state], key=self.q_table[state].get)
|
||||||
|
return best_action
|
||||||
|
|
||||||
|
def update_policy(self, reward):
|
||||||
|
# Update the Q-value based on the reward received after taking an action
|
||||||
|
# Get the previous state and action
|
||||||
|
prev_state = self.get_state() # Replace with the actual previous state
|
||||||
|
prev_action = self.predict_window_size() # Replace with the actual previous action
|
||||||
|
|
||||||
|
# Get the current state
|
||||||
|
curr_state = self.get_state()
|
||||||
|
|
||||||
|
# Check if the current state is in the Q-table
|
||||||
|
if curr_state not in self.q_table:
|
||||||
|
# Initialize Q-values for all possible actions in the current state
|
||||||
|
self.q_table[curr_state] = {window_size: 0 for window_size in self.window_sizes}
|
||||||
|
|
||||||
|
# Update the Q-value using the Q-learning update rule
|
||||||
|
max_q_value = max(self.q_table[curr_state].values()) # Get the maximum Q-value for the current state
|
||||||
|
self.q_table[prev_state][prev_action] += self.alpha * (reward + self.gamma * max_q_value - self.q_table[prev_state][prev_action])
|
||||||
|
|
||||||
|
# Create the Mininet network with custom topology
|
||||||
|
def create_network():
|
||||||
|
topo = MyTopology()
|
||||||
|
net = Mininet(topo=topo)
|
||||||
|
net.start()
|
||||||
|
return net
|
||||||
|
|
||||||
|
# Main function
|
||||||
|
if __name__ == '__main__':
|
||||||
|
setLogLevel('info')
|
||||||
|
|
||||||
|
# Create the Mininet network
|
||||||
|
net = create_network()
|
||||||
|
|
||||||
|
# Instantiate TCP and RL agents
|
||||||
|
tcp_agent = MyTCPAgent()
|
||||||
|
tunnels = ['myPrivate1']
|
||||||
|
tunnel_agent = TunnelSelectionAgent(tunnels)
|
||||||
|
window_agent = WindowPredictionAgent()
|
||||||
|
|
||||||
|
# Perform TCP connection establishment
|
||||||
|
tcp_agent.handle_connection()
|
||||||
|
|
||||||
|
# Perform data transfer with RL-based tunnel selection and window prediction
|
||||||
|
start_time = time.time()
|
||||||
|
|
||||||
|
while True:
|
||||||
|
if time.time() - start_time > 10: # End packet exchange after <X> seconds
|
||||||
|
break
|
||||||
|
tunnel = tunnel_agent.select_tunnel()
|
||||||
|
window_size = window_agent.predict_window_size()
|
||||||
|
|
||||||
|
# Perform data transfer
|
||||||
|
tcp_agent.handle_data_transfer(tunnel, window_size)
|
||||||
|
|
||||||
|
# Update RL agents based on rewards
|
||||||
|
reward = 0.5 # Actual reward value
|
||||||
|
tunnel_agent.update_policy(reward)
|
||||||
|
# window_agent.update_policy(reward)
|
||||||
|
|
||||||
|
# Stop the Mininet network
|
||||||
|
net.stop()
|
||||||
|
|
||||||
|
# Plot Transmission Round versus Congestion Window Size
|
||||||
|
plt.plot(tcp_agent.transmission_rounds, tcp_agent.congestion_window_sizes)
|
||||||
|
plt.xlabel('Transmission Round')
|
||||||
|
plt.ylabel('Congestion Window Size')
|
||||||
|
plt.title('TCP+RL Window Prediction')
|
||||||
|
plt.show()
|
@ -0,0 +1,109 @@
|
|||||||
|
import heapq
|
||||||
|
import random
|
||||||
|
from mininet.net import Mininet
|
||||||
|
from mininet.node import OVSSwitch, Controller
|
||||||
|
from mininet.link import TCLink
|
||||||
|
from mininet.net import Mininet
|
||||||
|
from mininet.topo import Topo
|
||||||
|
from mininet.node import Node
|
||||||
|
from mininet.cli import CLI
|
||||||
|
from mininet.link import TCLink
|
||||||
|
from mininet.log import setLogLevel
|
||||||
|
|
||||||
|
class MyRouter (Node):
|
||||||
|
def config(self, **params):
|
||||||
|
super(MyRouter, self).config(**params)
|
||||||
|
self.cmd('sysctl net.ipv4.ip_forward=1') #Enable forwarding on the router
|
||||||
|
def terminate(self):
|
||||||
|
self.cmd('sysctl net.ipv4.ip_forward=0') #Disable forwarding on the router
|
||||||
|
super(MyRouter, self).terminate
|
||||||
|
|
||||||
|
def create_topology():
|
||||||
|
net = Topo()
|
||||||
|
|
||||||
|
# Create the SD-WAN sites
|
||||||
|
site1 = net.addHost('site1')
|
||||||
|
site2 = net.addHost('site2')
|
||||||
|
site3 = net.addHost('site3')
|
||||||
|
|
||||||
|
# Create the virtual SD-WAN devices
|
||||||
|
sdwan1 = net.addHost('sdwan1')
|
||||||
|
sdwan2 = net.addHost('sdwan2')
|
||||||
|
|
||||||
|
# Create switches
|
||||||
|
switch1 = net.addSwitch('s1')
|
||||||
|
|
||||||
|
# Connect the hosts and switches
|
||||||
|
net.addLink(site1, switch1, bw=10, delay='10ms')
|
||||||
|
net.addLink(site2, switch1, bw=5, delay='20ms')
|
||||||
|
net.addLink(site3, switch1, bw=8, delay='15ms')
|
||||||
|
net.addLink(sdwan1, switch1, bw=100, delay='1ms')
|
||||||
|
net.addLink(sdwan2, switch1, bw=100, delay='1ms')
|
||||||
|
|
||||||
|
return net
|
||||||
|
|
||||||
|
def dynamic_path_selection(net, link_qos): # Function to select the best path based on real-time conditions using Dijkstra algorithm with QoS metrics
|
||||||
|
|
||||||
|
# Add a random factor to the QoS metric
|
||||||
|
random_range = 4
|
||||||
|
for link in link_qos:
|
||||||
|
link_qos[link] += random.randint(-random_range, random_range)
|
||||||
|
|
||||||
|
# Perform Dijkstra algorithm to find the best path based on QoS metrics
|
||||||
|
def dijkstra(source):
|
||||||
|
distance = {node: float('inf') for node in net}
|
||||||
|
distance[source] = 0
|
||||||
|
queue = [(0, source)]
|
||||||
|
while queue:
|
||||||
|
dist, node = heapq.heappop(queue)
|
||||||
|
if dist > distance[node]:
|
||||||
|
continue
|
||||||
|
for neighbor, _, link_info in net[node].connectionsTo(net):
|
||||||
|
qos = link_qos.get(link_info[0].intf1.name)
|
||||||
|
new_dist = dist + qos
|
||||||
|
if new_dist < distance[neighbor]:
|
||||||
|
distance[neighbor] = new_dist
|
||||||
|
heapq.heappush(queue, (new_dist, neighbor))
|
||||||
|
return distance
|
||||||
|
|
||||||
|
# Run Dijkstra algorithm from each site to determine the best path
|
||||||
|
site1_distance = dijkstra('site1')
|
||||||
|
site2_distance = dijkstra('site2')
|
||||||
|
site3_distance = dijkstra('site3')
|
||||||
|
sdwan1_distance = dijkstra('sdwan1')
|
||||||
|
sdwan2_distance = dijkstra('sdwan2')
|
||||||
|
|
||||||
|
# Select the best path based on minimum total QoS distance
|
||||||
|
best_path = min(site1_distance, site2_distance, site3_distance, sdwan1_distance, sdwan2_distance,
|
||||||
|
key=lambda x: sum(x.values()))
|
||||||
|
print("Best path based on QoS metrics:")
|
||||||
|
for node, distance in best_path.items():
|
||||||
|
print(f"Node: {node}, Total QoS Distance: {distance}")
|
||||||
|
|
||||||
|
def run_topology():
|
||||||
|
setLogLevel('info') #Different logging levels are 'info' 'warning' 'error' 'debug'
|
||||||
|
topo = create_topology()
|
||||||
|
net = Mininet(topo=topo, link=TCLink)
|
||||||
|
# net.addController('c0', controller=Controller) #non serve metterlo esplicitamente
|
||||||
|
|
||||||
|
net.start() #Starting the network
|
||||||
|
# Define QoS metrics for each link (example values)
|
||||||
|
link_qos = {
|
||||||
|
'site1-eth0': 8, # QoS value for link site1 -> switch1
|
||||||
|
'site2-eth0': 6, # QoS value for link site2 -> switch1
|
||||||
|
'site3-eth0': 9, # QoS value for link site3 -> switch1
|
||||||
|
'sdwan1-eth0': 10, # QoS value for link sdwan1 -> switch1
|
||||||
|
'sdwan2-eth0': 7 # QoS value for link sdwan2 -> switch1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add a random factor to the QoS metric
|
||||||
|
random_range = 4
|
||||||
|
for link in link_qos:
|
||||||
|
link_qos[link] += random.randint(-random_range, random_range)
|
||||||
|
|
||||||
|
dynamic_path_selection(net, link_qos)
|
||||||
|
CLI(net)
|
||||||
|
net.stop() #Stopping the network
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
run_topology()
|
@ -0,0 +1,187 @@
|
|||||||
|
import mininet
|
||||||
|
import time, socket, random
|
||||||
|
import numpy as np
|
||||||
|
from mininet.cli import CLI
|
||||||
|
from mininet.log import setLogLevel
|
||||||
|
from mininet.net import Mininet
|
||||||
|
from mininet.topo import Topo
|
||||||
|
from mininet.link import TCLink
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
# Define custom topology
|
||||||
|
class MyTopology(Topo):
|
||||||
|
def build(self):
|
||||||
|
# Create switches
|
||||||
|
s1 = self.addSwitch('s1')
|
||||||
|
s2 = self.addSwitch('s2')
|
||||||
|
|
||||||
|
# Create hosts
|
||||||
|
h1 = self.addHost('h1')
|
||||||
|
h2 = self.addHost('h2')
|
||||||
|
|
||||||
|
# Add links
|
||||||
|
self.addLink(h1, s1, cls=TCLink, delay='10ms', bw=1)
|
||||||
|
self.addLink(s1, s2, cls=TCLink, delay='50ms', bw=0.5)
|
||||||
|
self.addLink(s2, h2, cls=TCLink, delay='10ms', bw=1)
|
||||||
|
|
||||||
|
# Define TCP agent
|
||||||
|
class MyTCPAgent:
|
||||||
|
def __init__(self):
|
||||||
|
# Initialize TCP agent
|
||||||
|
self.transmission_rounds = []
|
||||||
|
self.congestion_window_sizes = []
|
||||||
|
|
||||||
|
|
||||||
|
def handle_connection(self):
|
||||||
|
print("TCP connection establishment")
|
||||||
|
# Implement TCP connection establishment
|
||||||
|
|
||||||
|
# Example TCP connection establishment using a socket
|
||||||
|
# self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
# self.sock.connect(('localhost', 19191))
|
||||||
|
|
||||||
|
# Example TCP connection establishment simulation
|
||||||
|
time.sleep(1) # Simulating connection establishment delay
|
||||||
|
|
||||||
|
def handle_data_transfer(self, tunnel, window_size):
|
||||||
|
print("Performing data transfer with tunnel:", tunnel, "and window size:", window_size)
|
||||||
|
# Implement TCP data transfer with the given tunnel and window size using socket programming
|
||||||
|
|
||||||
|
# Implement TCP data transfer simulation
|
||||||
|
|
||||||
|
# Placeholder logic: Simulating data transfer
|
||||||
|
time.sleep(0.1) # Simulating data transfer delay
|
||||||
|
|
||||||
|
# Simulate congestion control by waiting for a fixed amount of time
|
||||||
|
time.sleep(0.1)
|
||||||
|
|
||||||
|
# Record transmission round and final congestion window size
|
||||||
|
self.transmission_rounds.append(len(self.transmission_rounds) + 1)
|
||||||
|
self.congestion_window_sizes.append(window_size)
|
||||||
|
|
||||||
|
# Define RL agent for tunnel selection
|
||||||
|
class TunnelSelectionAgent:
|
||||||
|
def __init__(self, tunnels):
|
||||||
|
# Initialize your RL agent for tunnel selection
|
||||||
|
self.rewards = []
|
||||||
|
self.selected_tunnels = []
|
||||||
|
self.tunnels = tunnels
|
||||||
|
self.current_tunnel = 0
|
||||||
|
|
||||||
|
def select_tunnel(self):
|
||||||
|
# Implement tunnel selection based on RL policy
|
||||||
|
# Placeholder logic: Select a tunnel randomly or based on some criteria
|
||||||
|
selected_tunnel = self.tunnels[self.current_tunnel]
|
||||||
|
self.current_tunnel = (self.current_tunnel + 1) % len(self.tunnels)
|
||||||
|
return selected_tunnel
|
||||||
|
|
||||||
|
def update_policy(self, reward):
|
||||||
|
# Implement RL policy update based on rewards
|
||||||
|
self.rewards.append(reward)
|
||||||
|
self.selected_tunnels.append(self.select_tunnel())
|
||||||
|
|
||||||
|
# Define RL agent for window prediction
|
||||||
|
class WindowPredictionAgent:
|
||||||
|
def __init__(self):
|
||||||
|
# Initialize the RL agent for window prediction
|
||||||
|
self.window_sizes = [1, 2, 4, 8, 16, 32, 64] # Possible window sizes
|
||||||
|
self.alpha = 0.1 # Learning rate
|
||||||
|
self.gamma = 0.9 # Discount factor
|
||||||
|
self.q_table = {} # Q-table to store state-action values
|
||||||
|
|
||||||
|
def predict_window_size(self):
|
||||||
|
# Get the current state (e.g., network conditions)
|
||||||
|
state = self.get_state()
|
||||||
|
|
||||||
|
# Check if the state is in the Q-table
|
||||||
|
if state not in self.q_table:
|
||||||
|
# Initialize Q-values for all possible actions in the current state
|
||||||
|
self.q_table[state] = {window_size: 0 for window_size in self.window_sizes}
|
||||||
|
|
||||||
|
# Choose the action (window size) based on epsilon-greedy policy
|
||||||
|
if random.random() < 0.2: # Exploration (20% of the time)
|
||||||
|
action = random.choice(self.window_sizes)
|
||||||
|
else: # Exploitation (80% of the time)
|
||||||
|
action = self.get_best_action(state)
|
||||||
|
|
||||||
|
return action
|
||||||
|
|
||||||
|
def get_state(self):
|
||||||
|
# Implement the logic to determine the current state based on network conditions
|
||||||
|
# For example, you can consider factors such as round-trip time, packet loss rate, or congestion signals
|
||||||
|
|
||||||
|
# Placeholder logic: Return a random state
|
||||||
|
return random.randint(1, 10)
|
||||||
|
|
||||||
|
def get_best_action(self, state):
|
||||||
|
# Find the action (window size) with the highest Q-value for the given state
|
||||||
|
best_action = max(self.q_table[state], key=self.q_table[state].get)
|
||||||
|
return best_action
|
||||||
|
|
||||||
|
def update_policy(self, reward):
|
||||||
|
# Update the Q-value based on the reward received after taking an action
|
||||||
|
# Get the previous state and action
|
||||||
|
prev_state = self.get_state() # Replace with the actual previous state
|
||||||
|
prev_action = self.predict_window_size() # Replace with the actual previous action
|
||||||
|
|
||||||
|
# Get the current state
|
||||||
|
curr_state = self.get_state()
|
||||||
|
|
||||||
|
# Check if the current state is in the Q-table
|
||||||
|
if curr_state not in self.q_table:
|
||||||
|
# Initialize Q-values for all possible actions in the current state
|
||||||
|
self.q_table[curr_state] = {window_size: 0 for window_size in self.window_sizes}
|
||||||
|
|
||||||
|
# Update the Q-value using the Q-learning update rule
|
||||||
|
max_q_value = max(self.q_table[curr_state].values()) # Get the maximum Q-value for the current state
|
||||||
|
self.q_table[prev_state][prev_action] += self.alpha * (reward + self.gamma * max_q_value - self.q_table[prev_state][prev_action])
|
||||||
|
|
||||||
|
# Create the Mininet network with custom topology
|
||||||
|
def create_network():
|
||||||
|
topo = MyTopology()
|
||||||
|
net = Mininet(topo=topo)
|
||||||
|
net.start()
|
||||||
|
return net
|
||||||
|
|
||||||
|
# Main function
|
||||||
|
if __name__ == '__main__':
|
||||||
|
setLogLevel('info')
|
||||||
|
|
||||||
|
# Create the Mininet network
|
||||||
|
net = create_network()
|
||||||
|
|
||||||
|
# Instantiate TCP and RL agents
|
||||||
|
tcp_agent = MyTCPAgent()
|
||||||
|
tunnels = ['myPrivate1']
|
||||||
|
tunnel_agent = TunnelSelectionAgent(tunnels)
|
||||||
|
window_agent = WindowPredictionAgent()
|
||||||
|
|
||||||
|
# Perform TCP connection establishment
|
||||||
|
tcp_agent.handle_connection()
|
||||||
|
|
||||||
|
# Perform data transfer with RL-based tunnel selection and window prediction
|
||||||
|
start_time = time.time()
|
||||||
|
|
||||||
|
while True:
|
||||||
|
if time.time() - start_time > 10: # End packet exchange after <X> seconds
|
||||||
|
break
|
||||||
|
tunnel = tunnel_agent.select_tunnel()
|
||||||
|
window_size = window_agent.predict_window_size()
|
||||||
|
|
||||||
|
# Perform data transfer
|
||||||
|
tcp_agent.handle_data_transfer(tunnel, window_size)
|
||||||
|
|
||||||
|
# Update RL agents based on rewards
|
||||||
|
reward = 0.5 # Actual reward value
|
||||||
|
tunnel_agent.update_policy(reward)
|
||||||
|
# window_agent.update_policy(reward)
|
||||||
|
|
||||||
|
# Stop the Mininet network
|
||||||
|
net.stop()
|
||||||
|
|
||||||
|
# Plot Transmission Round versus Congestion Window Size
|
||||||
|
plt.plot(tcp_agent.transmission_rounds, tcp_agent.congestion_window_sizes)
|
||||||
|
plt.xlabel('Transmission Round')
|
||||||
|
plt.ylabel('Congestion Window Size')
|
||||||
|
plt.title('TCP+RL Window Prediction')
|
||||||
|
plt.show()
|
Loading…
Reference in New Issue