# - Eigenvalue Packing/Unpacking for her comfort ;)
#I expect some will ask ehhh what's going on clock? No ^ just logic. Check out the VHDL Print for a #possibly near infinite virtual memory range register :-P
# adi-comm-protocol.py
# This module defines the communication protocol and data handling functions
# for the Adi Server and its clients.
import socket
import struct
import json
import numpy as np
# --- Operation Constants (for interoperability) ---
OPERATION_INTERPOLATE_HYPERBOLIC_PARABOLIC = 1
OPERATION_INTERPOLATE_ARCSECANT_STREAM = 2
OPERATION_EIGENVALUE_PACKING = 3
# --- Helper functions for data transmission ---
def send_data(conn, operation_type, data):
"""
Sends structured data over a socket.
The function first packs the data and a header into a JSON object,
then sends the JSON size and the JSON itself. Following the JSON, it
sends any associated binary data (e.g., from numpy arrays).
Args:
conn (socket.socket): The socket to send data through.
operation_type (int): The type of operation being requested.
data (dict): A dictionary containing 'result' or other data to send.
"""
# Check if a numpy array is present and needs to be serialized
binary_data = b''
data_pointers = {}
for key, val in data.items():
if isinstance(val, np.ndarray):
# Convert the numpy array to bytes
array_bytes = val.astype(np.float32).tobytes()
data_pointers[key] = {
'offset': len(binary_data),
'length': len(array_bytes),
'dtype': 'float32'
}
binary_data += array_bytes
data[key] = None # Remove the array from the JSON data to prevent serialization issues
# Create the JSON header with the operation type and data pointers
json_header = {
"operation": operation_type,
"data": data,
"data_pointers": data_pointers
}
# Serialize the header to JSON bytes
json_header_bytes = json.dumps(json_header).encode('utf-8')
header_size = len(json_header_bytes)
# Send the size of the header, the header itself, and the binary data
conn.sendall(struct.pack('!I', header_size))
conn.sendall(json_header_bytes)
conn.sendall(binary_data)
def receive_data(conn):
"""
Receives structured data from a socket.
Args:
conn (socket.socket): The socket to receive data from.
Returns:
tuple: A tuple containing the JSON header (dict) and the binary
payload (bytes), or (None, None) if the connection closes.
"""
try:
# First, receive the size of the JSON header
header_size_bytes = conn.recv(4)
if not header_size_bytes:
return None, None
header_size = struct.unpack('!I', header_size_bytes)[0]
# Then, receive the JSON header itself
json_header_bytes = b''
while len(json_header_bytes) < header_size:
chunk = conn.recv(header_size - len(json_header_bytes))
if not chunk:
return None, None
json_header_bytes += chunk
json_header = json.loads(json_header_bytes.decode('utf-8'))
# Calculate total binary data length from pointers
total_data_length = sum(ptr['length'] for ptr in json_header.get('data_pointers', {}).values())
# Finally, receive the binary data
data_bytes = b''
while len(data_bytes) < total_data_length:
chunk = conn.recv(total_data_length - len(data_bytes))
if not chunk:
return None, None
data_bytes += chunk
return json_header, data_bytes
except (socket.error, struct.error, json.JSONDecodeError) as e:
print(f"Error receiving data: {e}")
return None, None
# --- New Functions for Eigenvalue Packing ---
def pack_matrix_to_eigen(matrix: np.ndarray):
"""
Performs eigendecomposition and packs the eigenvalues and eigenvectors.
Args:
matrix (np.ndarray): The square matrix to be packed.
Returns:
tuple: A tuple containing the JSON header and a binary payload.
"""
if matrix.shape[0] != matrix.shape[1]:
raise ValueError("Matrix must be square for eigendecomposition.")
# Perform eigendecomposition
eigenvalues, eigenvectors = np.linalg.eig(matrix)
# The client will use this to reconstruct the matrix
json_header = {
'operation': OPERATION_EIGENVALUE_PACKING,
'data_pointers': {
'eigenvalues': {'offset': 0, 'length': len(eigenvalues) * 4, 'dtype': 'float32'},
'eigenvectors': {'offset': len(eigenvalues) * 4, 'length': len(eigenvectors.flatten()) * 4, 'dtype': 'float32'},
},
'metadata': {
'matrix_shape': matrix.shape
}
}
# Concatenate the binary data
binary_payload = eigenvalues.astype(np.float32).tobytes() + eigenvectors.astype(np.float32).tobytes()
return json_header, binary_payload
def reconstruct_matrix(data_bytes, data_pointers, matrix_shape):
"""
Reconstructs the original matrix from packed eigenvalues and eigenvectors.
Args:
data_bytes (bytes): The binary payload.
data_pointers (dict): Dictionary with offsets and lengths.
matrix_shape (tuple): The original shape of the matrix.
Returns:
np.ndarray: The reconstructed matrix.
"""
eigenvalues_info = data_pointers['eigenvalues']
eigenvectors_info = data_pointers['eigenvectors']
# Unpack the binary data
eigenvalues = np.frombuffer(
data_bytes,
offset=eigenvalues_info['offset'],
count=eigenvalues_info['length'] // 4,
dtype=np.float32
)
eigenvectors = np.frombuffer(
data_bytes,
offset=eigenvectors_info['offset'],
count=eigenvectors_info['length'] // 4,
dtype=np.float32
).reshape(matrix_shape)
# Reconstruct the matrix: A = P * D * P_inverse
# Where P is the matrix of eigenvectors and D is the diagonal matrix of eigenvalues
D = np.diag(eigenvalues)
P = eigenvectors
P_inv = np.linalg.inv(P)
reconstructed_matrix = P @ D @ P_inv
# The result may have very small imaginary components due to floating point inaccuracies,
# so we'll just return the real part.
return np.real(reconstructed_matrix)
# adi_comm-client.py
# A sample client that sends a matrix to the server using eigenvalue packing.
import socket
import numpy as np
import json
import struct
# Import the new protocol for interoperability
from protocol import (
send_data,
receive_data,
pack_matrix_to_eigen,
OPERATION_EIGENVALUE_PACKING
)
def run_client():
"""
Connects to the server, packs a sample matrix, sends it, and receives the result.
"""
SERVER_HOST = '127.0.0.1'
SERVER_PORT = 12345
# Create a sample square matrix to be packed
# Make it a bit large to show the value of packing
sample_matrix = np.array([
[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0],
[13.0, 14.0, 15.0, 16.0]
], dtype=np.float32)
print("Original Matrix:")
print(sample_matrix)
# Pack the matrix using the new protocol function
packed_header, packed_payload = pack_matrix_to_eigen(sample_matrix)
# Connect to the server
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client_socket:
try:
client_socket.connect((SERVER_HOST, SERVER_PORT))
print("Connected to server.")
# Send the packed data to the server
send_data(client_socket, packed_header['operation'], {
'eigenvalues': np.frombuffer(packed_payload, dtype=np.float32, count=len(packed_header['data_pointers']['eigenvalues']) // 4),
'eigenvectors': np.frombuffer(packed_payload, dtype=np.float32, offset=len(packed_header['data_pointers']['eigenvalues']) // 4),
})
# Receive the response from the server
response_header, response_bytes = receive_data(client_socket)
if response_header and 'result' in response_header['data']:
result_from_server = np.frombuffer(response_bytes, dtype=np.float32)
print(f"\nReceived result from server: {result_from_server}")
else:
print("Failed to receive a valid response.")
except ConnectionRefusedError:
print(f"Connection refused. Is the server running on {SERVER_HOST}:{SERVER_PORT}?")
except Exception as e:
print(f"An error occurred: {e}")
if __name__ == "__main__":
run_client()
# adi_comm-server.py
# This new, unified server handles all FPU operations from both
# n-math.py and e-stream.py using a single, efficient process pool.
import socket
import struct
import numpy as np
import json
import threading
import multiprocessing
import traceback
# Import the new protocol for interoperability
from protocol import (
receive_data,
send_data,
OPERATION_INTERPOLATE_HYPERBOLIC_PARABOLIC,
OPERATION_INTERPOLATE_ARCSECANT_STREAM,
OPERATION_EIGENVALUE_PACKING,
reconstruct_matrix # The new function to unpack the data
)
# --- FPU Operations (Consolidated from n-math.py and e-stream.py) ---
# These functions are now standalone and can be executed by worker processes.
def hyperbolic_parabolic_interpolation_nd_revised(data_bytes, data_pointers):
"""
Performs hyperbolic-parabolic interpolation on n-dimensional data.
"""
all_fy_data = []
all_fx_data = []
x_interp = None
# Unpack the binary data based on the JSON data_pointers
for key, ptr in data_pointers.items():
start = ptr['offset']
end = start + ptr['length']
unpacked_data = np.frombuffer(data_bytes[start:end], dtype=np.float32)
if key.startswith('fx'):
all_fx_data.append(unpacked_data)
elif key.startswith('fy'):
all_fy_data.append(unpacked_data)
elif key == 'x_interp':
x_interp = unpacked_data
if len(all_fx_data) != len(all_fy_data) or not x_interp:
raise ValueError("Invalid data for interpolation.")
all_interp_y = []
num_dimensions = len(all_fy_data)
for fx, fy in zip(all_fx_data, all_fy_data):
if len(fx) != len(fy) or len(fx) < 3:
raise ValueError("X and Y data must have equal length and at least three points.")
interp_y = []
for x in x_interp:
distances = np.abs(fx - x)
nearest_indices = np.argsort(distances)[:3]
x1, x2, x3 = fx[nearest_indices]
y1, y2, y3 = fy[nearest_indices]
# Simplified interpolation logic for demonstration
if x1 == x2 or x2 == x3 or x1 == x3:
raise RuntimeError("Collinear points detected, cannot interpolate.")
# Parabolic interpolation (y = ax^2 + bx + c)
# Using matrix inversion for a more robust solution
A = np.array([
[x1**2, x1, 1],
[x2**2, x2, 1],
[x3**2, x3, 1]
])
b = np.array([y1, y2, y3])
try:
a, b_lin, c = np.linalg.solve(A, b)
y_interp = a * x**2 + b_lin * x + c
except np.linalg.LinAlgError:
raise RuntimeError("Could not solve parabolic interpolation matrix.")
interp_y.append(y_interp)
all_interp_y.append(np.array(interp_y))
return np.concatenate(all_interp_y)
def pseudo_interpolate_arcsecant_stream(data_bytes, data_pointers):
"""
Pseudo-interpolates a chunk of interpolation points using pre-existing data.
This now works as a single, callable function within the service.
"""
x_data = np.frombuffer(data_bytes[data_pointers['x_data']['offset'] : data_pointers['x_data']['offset'] + data_pointers['x_data']['length']], dtype=np.float32)
y_data = np.frombuffer(data_bytes[data_pointers['y_data']['offset'] : data_pointers['y_data']['offset'] + data_pointers['y_data']['length']], dtype=np.float32)
x_interp_chunk = np.frombuffer(data_bytes[data_pointers['x_interp_chunk']['offset'] : data_pointers['x_interp_chunk']['offset'] + data_pointers['x_interp_chunk']['length']], dtype=np.float32)
def _calculate_arcsecant(val):
if np.abs(val) < 1:
return np.nan
return np.arccos(1 / val)
interpolated_y = []
for x in x_interp_chunk:
# Simplified logic: find nearest point and apply a transformation
nearest_idx = np.argmin(np.abs(x_data - x))
y_val = y_data[nearest_idx]
interpolated_y.append(_calculate_arcsecant(y_val))
return np.array(interpolated_y, dtype=np.float32)
# --- Server and Multiprocessing Logic ---
def worker_process(request_data):
"""
A worker function that is executed by a process in the pool.
It takes a request dictionary and dispatches the correct FPU operation.
"""
op_type = request_data["operation"]
data_bytes = request_data["data_bytes"]
data_pointers = request_data["data_pointers"]
if op_type == OPERATION_INTERPOLATE_HYPERBOLIC_PARABOLIC:
result = hyperbolic_parabolic_interpolation_nd_revised(data_bytes, data_pointers)
elif op_type == OPERATION_INTERPOLATE_ARCSECANT_STREAM:
result = pseudo_interpolate_arcsecant_stream(data_bytes, data_pointers)
elif op_type == OPERATION_EIGENVALUE_PACKING:
# Reconstruct the matrix from the packed data
matrix_shape = request_data["metadata"]["matrix_shape"]
reconstructed_matrix = reconstruct_matrix(data_bytes, data_pointers, matrix_shape)
# Perform a sample calculation on the reconstructed matrix
trace_value = np.trace(reconstructed_matrix)
print(f"Reconstructed Matrix from packed data:\n{reconstructed_matrix}")
print(f"Calculated Trace: {trace_value}")
result = np.array([trace_value], dtype=np.float32)
else:
raise ValueError(f"Unknown operation type: {op_type}")
return result
def handle_client(client_socket, addr, pool):
"""
Handles a single client connection.
Parses the request and submits the FPU task to the multiprocessing pool.
"""
print(f"Accepted connection from {addr}")
try:
json_header, data_bytes = receive_data(client_socket)
if json_header is None:
print(f"Client {addr} disconnected unexpectedly.")
return
print(f"Received request from {addr} for operation {json_header['operation']}")
request_data = {
"operation": json_header["operation"],
"data_bytes": data_bytes,
"data_pointers": json_header["data_pointers"]
}
# Add metadata if present
if 'metadata' in json_header:
request_data['metadata'] = json_header['metadata']
# Submit the FPU task to the process pool
result_async = pool.apply_async(worker_process, (request_data,))
result = result_async.get() # Block until the result is available
# Send the result back to the client
send_data(client_socket, json_header["operation"], {"result": result})
print(f"Sent result back to {addr}.")
except (ValueError, RuntimeError) as e:
print(f"ValueError or RuntimeError on server from {addr}: {e}")
error_message = str(e).encode('utf-8')
client_socket.sendall(struct.pack('!I', len(error_message)))
client_socket.sendall(error_message)
except ConnectionResetError:
print(f"Client {addr} forcibly closed the connection.")
except Exception as e:
print(f"An unexpected error occurred in handle_client for {addr}: {e}")
traceback.print_exc()
finally:
client_socket.close()
print(f"Connection with client {addr} closed.")
def start_server(host, port):
"""
Starts the main server, listens for connections, and manages the process pool.
"""
# Create a process pool with a number of workers equal to CPU cores
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((host, port))
server_socket.listen(5)
print(f"Server listening on {host}:{port}")
try:
while True:
client_socket, addr = server_socket.accept()
# Handle each client in a separate thread to not block the main loop
client_thread = threading.Thread(target=handle_client, args=(client_socket, addr, pool))
client_thread.start()
except KeyboardInterrupt:
print("\nServer shutting down...")
finally:
server_socket.close()
pool.close()
pool.join()
print("Server shutdown complete.")
if __name__ == "__main__":
SERVER_HOST = '127.0.0.1'
SERVER_PORT = 12345
start_server(SERVER_HOST, SERVER_PORT)
-- VHDL Architecture for a custom Eigenvalue Packing Bridge
-- This design is refit to the Python client's JSON-based protocol.
-- This version includes a conceptual framework for virtual memory addressing.
library ieee;
use ieee.std_logic_1164.all;
use ieee.numeric_std.all;
----------------------------------------------------------------------
-- Bridge_Core Entity Declaration
-- This defines the external ports of our FPGA module.
----------------------------------------------------------------------
entity Eigen_Bridge is
port (
-- Clock and Reset signals
clk_i : in std_logic; -- Main clock input
rst_i : in std_logic; -- Synchronous reset input
-- External Parallel Bus Interface (Data, Write Enable, Ready)
data_in_i : in std_logic_vector(31 downto 0); -- 32-bit data input from host
write_enable_i : in std_logic; -- Write enable strobe from host
read_enable_i : in std_logic; -- Read enable strobe from host
ready_o : out std_logic; -- Ready signal to host
-- Data output back to host
data_out_o : out std_logic_vector(31 downto 0); -- 32-bit data output to host
-- Status and Control
error_o : out std_logic; -- Error flag to host
-- Conceptual Virtual Memory Interface
virtual_address_i : in std_logic_vector(31 downto 0); -- Conceptual virtual address
physical_address_o: out std_logic_vector(31 downto 0); -- Conceptual physical address
page_fault_o : out std_logic -- Conceptual page fault signal
);
end Eigen_Bridge;
----------------------------------------------------------------------
-- Eigen_Bridge Architecture
-- This is the behavioral description of the bridge's logic, refit
-- to the Python client's protocol for eigenvalue packing.
----------------------------------------------------------------------
architecture Behavioral of Eigen_Bridge is
-- Internal component for the eigenvalue calculation core
component Eigenvalue_Core is
port (
-- Clock and Reset
clk_i : in std_logic;
rst_i : in std_logic;
-- Control signals for data handshake
start_i : in std_logic; -- Signal to start a new calculation
done_o : out std_logic; -- Indicates when the calculation is complete
busy_o : out std_logic; -- Indicates the core is busy
-- Pointers to input data buffers
eigenvalues_ptr_i: in std_logic_vector(31 downto 0);
eigenvectors_ptr_i: in std_logic_vector(31 downto 0);
matrix_size_i : in std_logic_vector(31 downto 0);
-- Output result
result_o : out std_logic_vector(31 downto 0); -- Result of the calculation
result_len_o : out std_logic_vector(31 downto 0)
);
end component;
-- State machine for controlling the data flow
type state_type is (IDLE,
RECEIVE_JSON_HEADER_SIZE,
RECEIVE_JSON_HEADER,
RECEIVE_EIGENVALUES,
RECEIVE_EIGENVECTORS,
VIRTUAL_TO_PHYSICAL_TRANSLATION, -- New state for conceptual MMU
CALCULATE,
SEND_RESULT_LEN,
SEND_RESULT_DATA,
ERROR);
signal state : state_type := IDLE;
-- Internal signals for communication between bridge and core
signal start_calc_s : std_logic := '0';
signal core_done_s : std_logic := '0';
signal core_busy_s : std_logic := '0';
signal core_result_s : std_logic_vector(31 downto 0);
signal core_result_len_s: std_logic_vector(31 downto 0);
-- Conceptual Dual-Port RAM for data buffering
type ram_type is array (0 to 65535) of std_logic_vector(31 downto 0);
signal data_ram : ram_type;
signal write_address: integer range 0 to 65535 := 0;
signal read_address: integer range 0 to 65535 := 0;
-- Signals to hold data from the JSON header
signal json_header_size_s : std_logic_vector(31 downto 0);
signal eigenvalues_len_s : std_logic_vector(31 downto 0);
signal eigenvectors_len_s : std_logic_vector(31 downto 0);
signal matrix_size_s : std_logic_vector(31 downto 0);
signal data_offset_s : std_logic_vector(31 downto 0);
-- Operation code from the Python protocol
signal op_code_expected : std_logic_vector(31 downto 0) := x"00000003";
-- Counters for data reception and transmission
signal write_counter : integer range 0 to 65536 := 0;
signal read_counter : integer range 0 to 65536 := 0;
-- Conceptual virtual memory signals
signal virtual_address_s : std_logic_vector(31 downto 0);
signal physical_address_s : std_logic_vector(31 downto 0);
signal page_fault_s : std_logic;
begin
-- Instantiate the Eigenvalue_Core component
Core_Instance: Eigenvalue_Core
port map (
clk_i => clk_i,
rst_i => rst_i,
start_i => start_calc_s,
done_o => core_done_s,
busy_o => core_busy_s,
eigenvalues_ptr_i=> (others => '0'), -- Conceptual pointers
eigenvectors_ptr_i=> (others => '0'), -- to the data_ram
matrix_size_i => matrix_size_s,
result_o => core_result_s,
result_len_o => core_result_len_s
);
-- Conceptual Memory Management Unit (MMU) process
-- This process conceptually translates virtual addresses to physical addresses.
-- In a real design, this would be a much more complex block with page tables.
process(clk_i, rst_i)
begin
if rst_i = '1' then
physical_address_s <= (others => '0');
page_fault_s <= '0';
elsif rising_edge(clk_i) then
-- Check if a virtual address needs translation (e.g., in a new state)
if state = VIRTUAL_TO_PHYSICAL_TRANSLATION then
-- For this conceptual model, we will simply use the lower bits
-- of the virtual address as the physical address.
-- This represents a direct mapping without a page table.
physical_address_s <= virtual_address_s;
-- Check for a conceptual page fault.
-- A real page fault would be if the address is not in memory.
-- Here, we'll just check if the address is out of our physical range.
if unsigned(virtual_address_s) > 65535 then
page_fault_s <= '1';
else
page_fault_s <= '0';
end if;
else
page_fault_s <= '0';
end if;
end if;
end process;
-- Connect conceptual signals to ports
physical_address_o <= physical_address_s;
page_fault_o <= page_fault_s;
-- Main State Machine Process
process(clk_i, rst_i)
begin
if rst_i = '1' then
state <= IDLE;
write_counter <= 0;
read_counter <= 0;
write_address <= 0;
read_address <= 0;
start_calc_s <= '0';
ready_o <= '0';
data_out_o <= (others => '0');
error_o <= '0';
elsif rising_edge(clk_i) then
-- Default assignments
start_calc_s <= '0';
ready_o <= '0';
case state is
-- State 1: Awaiting a new request
when IDLE =>
ready_o <= '1';
error_o <= '0';
write_address <= 0;
if write_enable_i = '1' then
state <= RECEIVE_JSON_HEADER_SIZE;
end if;
-- State 2: Receiving the JSON header size (4 bytes)
when RECEIVE_JSON_HEADER_SIZE =>
ready_o <= '1';
if write_enable_i = '1' then
json_header_size_s <= data_in_i;
write_counter <= 0;
state <= RECEIVE_JSON_HEADER;
end if;
-- State 3: Receiving the fixed-size JSON header (conceptualized)
when RECEIVE_JSON_HEADER =>
ready_o <= '1';
if write_enable_i = '1' then
-- Conceptual parsing of a fixed-size header
if write_counter = 0 then
-- First word: check for operation code
if data_in_i = op_code_expected then
-- Do nothing, proceed to next word
else
state <= ERROR;
end if;
elsif write_counter = 1 then
eigenvalues_len_s <= data_in_i;
elsif write_counter = 2 then
eigenvectors_len_s <= data_in_i;
elsif write_counter = 3 then
matrix_size_s <= data_in_i;
end if;
write_counter <= write_counter + 1;
-- Assuming a fixed header size of 4 words (16 bytes)
if write_counter = 3 then
data_offset_s <= x"00000004"; -- The binary payload starts after the header
write_address <= 4;
write_counter <= 0;
state <= RECEIVE_EIGENVALUES;
end if;
end if;
-- State 4: Receiving the eigenvalues binary data
when RECEIVE_EIGENVALUES =>
ready_o <= '1';
if write_enable_i = '1' then
data_ram(write_address) <= data_in_i;
write_address <= write_address + 1;
write_counter <= write_counter + 1;
if write_counter = to_integer(unsigned(eigenvalues_len_s)) / 4 then
write_counter <= 0;
state <= RECEIVE_EIGENVECTORS;
end if;
end if;
-- State 5: Receiving the eigenvectors binary data
when RECEIVE_EIGENVECTORS =>
ready_o <= '1';
if write_enable_i = '1' then
data_ram(write_address) <= data_in_i;
write_address <= write_address + 1;
write_counter <= write_counter + 1;
if write_counter = to_integer(unsigned(eigenvectors_len_s)) / 4 then
state <= CALCULATE;
end if;
end if;
-- State 6: Triggering the calculation core
when CALCULATE =>
-- In a real system, the core would read from the physical addresses
-- mapped by the conceptual MMU.
start_calc_s <= '1'; -- Signal to the core to start
ready_o <= '0';
read_address <= 0; -- Reset read address for output
if core_done_s = '1' then
state <= SEND_RESULT_LEN;
end if;
-- State 7: Sending the result length back to the host
when SEND_RESULT_LEN =>
ready_o <= '1';
data_out_o <= core_result_len_s;
if read_enable_i = '1' then
state <= SEND_RESULT_DATA;
read_counter <= 0;
end if;
-- State 8: Sending the result data back to the host
when SEND_RESULT_DATA =>
ready_o <= '1';
data_out_o <= core_result_s;
if read_enable_i = '1' then
read_counter <= read_counter + 1;
if read_counter = to_integer(unsigned(core_result_len_s)) - 1 then
state <= IDLE;
end if;
end if;
-- State 9: Error state
when ERROR =>
error_o <= '1';
ready_o <= '0';
if rst_i = '1' then
state <= IDLE;
end if;
end case;
end if;
end process;
end Behavioral;
No comments:
Post a Comment