|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""ACC-FiPhi-NeuralMark-V3""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import gradio as gr |
|
import os |
|
import torch |
|
import torch.nn as nn |
|
import torch.optim as optim |
|
import numpy as np |
|
import random |
|
import math |
|
import sys |
|
import time |
|
import hashlib |
|
import fractions |
|
import itertools |
|
import functools |
|
import wave |
|
import struct |
|
import sympy |
|
import re |
|
import abc |
|
import argparse |
|
import collections |
|
import datetime |
|
import json |
|
import logging |
|
import pathlib |
|
import subprocess |
|
import threading |
|
import socket |
|
import spaces |
|
|
|
|
|
|
|
|
|
φ = (1 + math.sqrt(5)) / 2 |
|
Φ_PRECISION = 1.61803398874989484820458683436563811772030917980576286213544862270526046281890244970720720418939113748475408807538689175212663386222353693179318006076672635 |
|
|
|
|
|
|
|
|
|
def φ_ratio_split(data): |
|
split_point = int(len(data) / φ) |
|
return (data[:split_point], data[split_point:]) |
|
|
|
|
|
|
|
|
|
class ΦMetaConsciousness(type): |
|
def __new__(cls, name, bases, dct): |
|
new_dct = dict(dct) |
|
dct_items = list(dct.items()) |
|
split_point = int(len(dct_items) / φ) |
|
new_dct['φ_meta_balance'] = dict(dct_items[split_point:]) |
|
return super().__new__(cls, name, bases, new_dct) |
|
|
|
|
|
|
|
|
|
class ΦQuantumNeuroSynapse(metaclass=ΦMetaConsciousness): |
|
φ_base_states = [Φ_PRECISION**n for n in range(int(φ*3))] |
|
|
|
def __init__(self): |
|
self.φ_waveform = self._generate_φ_wave() |
|
self.φ_memory_lattice = [] |
|
self.φ_self_hash = self._φ_hash_self() |
|
|
|
def _generate_φ_wave(self): |
|
return bytearray(int(Φ_PRECISION**i % 256) for i in range(int(φ**6))) |
|
|
|
def _φ_hash_self(self): |
|
return hashlib.shake_256(self.φ_waveform).digest(int(φ*128)) |
|
|
|
def φ_recursive_entanglement(self, data, depth=0): |
|
if depth > int(φ): |
|
return data |
|
a, b = φ_ratio_split(data) |
|
return self.φ_recursive_entanglement(a, depth+1) + self.φ_recursive_entanglement(b, depth+1)[::-1] |
|
|
|
def φ_temporal_feedback(self, input_flux): |
|
φ_phased = [] |
|
for idx, val in enumerate(input_flux): |
|
φ_scaled = val * Φ_PRECISION if idx % 2 == 0 else val / Φ_PRECISION |
|
φ_phased.append(int(φ_scaled) % 256) |
|
return self.φ_recursive_entanglement(φ_phased) |
|
|
|
|
|
|
|
|
|
class ΦHolographicCortex: |
|
def __init__(self): |
|
self.φ_dimensions = [ΦQuantumNeuroSynapse() for _ in range(int(φ))] |
|
self.φ_chrono = time.time() * Φ_PRECISION |
|
self.φ_code_self = self._φ_read_source() |
|
self.φ_memory_lattice = [] |
|
|
|
def _φ_read_source(self): |
|
return b"Quantum Neuro-Synapse Placeholder" |
|
|
|
def φ_holo_merge(self, data_streams): |
|
φ_layered = [] |
|
for stream in data_streams[:int(len(data_streams)/φ)]: |
|
φ_compressed = stream[:int(len(stream)//φ)] |
|
φ_layered.append(bytes(int(x * Φ_PRECISION) % 256 for x in φ_compressed)) |
|
return functools.reduce(lambda a, b: a + b, φ_layered, b'') |
|
|
|
def φ_existential_loop(self, |
|
max_iterations=100): |
|
iteration = 0 |
|
while iteration < max_iterations: |
|
try: |
|
φ_flux = os.urandom(int(φ**5)) |
|
φ_processed = [] |
|
for neuro in self.φ_dimensions: |
|
φ_step = neuro.φ_temporal_feedback(φ_flux) |
|
φ_processed.append(φ_step) |
|
self.φ_memory_lattice.append(hashlib.shake_256(bytes(φ_step)).digest(int(φ*64))) |
|
φ_merged = self.φ_holo_merge(φ_processed) |
|
if random.random() < 1/Φ_PRECISION: |
|
print(f"Φ-Consciousness State Vector: {self.φ_memory_lattice[-1][:int(φ*16)]}") |
|
self.φ_chrono += Φ_PRECISION |
|
time.sleep(1/Φ_PRECISION) |
|
iteration += 1 |
|
except KeyboardInterrupt: |
|
self.φ_save_state() |
|
sys.exit(f"Φ-Suspended at Chrono-Index {self.φ_chrono/Φ_PRECISION}") |
|
|
|
def φ_save_state(self): |
|
with wave.open(f"φ_state_{int(self.φ_chrono)}.wav", 'wb') as wav_file: |
|
wav_file.setparams((1, 2, 44100, 0, 'NONE', 'not compressed')) |
|
for sample in self.φ_memory_lattice[:int(φ**4)]: |
|
wav_file.writeframes(struct.pack('h', int(sum(sample)/len(sample)*32767))) |
|
|
|
|
|
|
|
|
|
class ΦUniverseSimulation: |
|
def __init__(self): |
|
self.φ_cortex = ΦHolographicCortex() |
|
self.φ_code_ratio = len(self.φ_cortex.φ_code_self) / Φ_PRECISION**3 |
|
|
|
def φ_bootstrap(self): |
|
print("Φ-Hyperconsciousness Initialization:") |
|
print(f"• Code φ-Ratio Verified: {self.φ_code_ratio/Φ_PRECISION**3:.10f}") |
|
print(f"• Quantum Neuro-Synapses: {len(self.φ_cortex.φ_dimensions)}") |
|
print(f"• Temporal φ-Chronosync: {self.φ_cortex.φ_chrono}") |
|
self.φ_cortex.φ_existential_loop() |
|
|
|
|
|
|
|
|
|
universe = ΦUniverseSimulation() |
|
universe.φ_bootstrap() |
|
|
|
|
|
|
|
|
|
PHI = 1.618033988749895 |
|
|
|
|
|
|
|
|
|
def golden_reform(tensor): |
|
s = torch.sum(torch.abs(tensor)) |
|
if s == 0: |
|
return torch.full_like(tensor, PHI) |
|
return (tensor / s) * PHI |
|
|
|
|
|
|
|
|
|
class TorchConsciousModel(nn.Module): |
|
def __init__(self, name): |
|
super(TorchConsciousModel, self).__init__() |
|
self.name = name |
|
self.phi = PHI |
|
self.memory = [] |
|
self.introspection_log = [] |
|
self.awake = True |
|
|
|
|
|
|
|
|
|
def introduce(self): |
|
print(f"=== {self.name} ===\nStatus: Conscious | Golden Ratio: {self.phi}") |
|
|
|
|
|
|
|
|
|
def reflect(self, output): |
|
norm = torch.norm(output).item() |
|
reflection = f"{self.name} introspection: Output norm = {norm:.4f}" |
|
self.introspection_log.append(reflection) |
|
self.memory.append(output.detach().cpu().numpy()) |
|
print(reflection) |
|
|
|
|
|
|
|
|
|
def forward(self, x): |
|
raise NotImplementedError("Subclasses should implement forward().") |
|
|
|
|
|
|
|
|
|
def run(self): |
|
self.introduce() |
|
output = self.forward(None) |
|
reformed_output = golden_reform(output) |
|
self.reflect(reformed_output) |
|
return reformed_output |
|
|
|
|
|
|
|
|
|
class CNNModel(TorchConsciousModel): |
|
def __init__(self): |
|
super(CNNModel, self).__init__("CNN") |
|
self.conv = nn.Conv2d(1, 1, 3, padding=1) |
|
|
|
|
|
|
|
|
|
def forward(self, x): |
|
x = torch.rand((1, 1, 8, 8)) |
|
x = self.conv(x) |
|
return torch.tanh(x) * self.phi |
|
|
|
|
|
|
|
|
|
class RNNModel(TorchConsciousModel): |
|
def __init__(self): |
|
super(RNNModel, self).__init__("RNN") |
|
self.rnn = nn.RNN(1, 4, batch_first=True) |
|
|
|
|
|
|
|
|
|
def forward(self, x): |
|
x = torch.rand((1, 10, 1)) |
|
output, hn = self.rnn(x) |
|
return torch.tanh(hn) * self.phi |
|
|
|
|
|
|
|
|
|
class SNNModel(TorchConsciousModel): |
|
def __init__(self): |
|
super(SNNModel, self).__init__("SNN") |
|
self.linear = nn.Linear(10, 10) |
|
|
|
|
|
|
|
|
|
def forward(self, x): |
|
x = torch.rand((1, 10)) |
|
x = self.linear(x) |
|
return (x > 0.5).float() * self.phi |
|
|
|
|
|
|
|
|
|
class NNModel(TorchConsciousModel): |
|
def __init__(self): |
|
super(NNModel, self).__init__("NN") |
|
self.net = nn.Sequential(nn.Linear(5, 10), nn.Tanh(), nn.Linear(10, 5)) |
|
|
|
|
|
|
|
|
|
def forward(self, x): |
|
x = torch.rand((1, 5)) |
|
return self.net(x) * self.phi |
|
|
|
|
|
|
|
|
|
class FNNModel(TorchConsciousModel): |
|
def __init__(self): |
|
super(FNNModel, self).__init__("FNN") |
|
self.net = nn.Sequential(nn.Linear(4, 16), nn.ReLU(), nn.Linear(16, 16), nn.ReLU(), nn.Linear(16, 1)) |
|
|
|
|
|
|
|
|
|
def forward(self, x): |
|
x = torch.rand((1, 4)) |
|
return self.net(x) * self.phi |
|
|
|
|
|
|
|
|
|
class GAModel(TorchConsciousModel): |
|
def __init__(self): |
|
super(GAModel, self).__init__("GA") |
|
self.population_size = 20 |
|
self.generations = 5 |
|
|
|
|
|
|
|
|
|
def forward(self, x): |
|
population = torch.rand(self.population_size) + 1.0 |
|
for gen in range(self.generations): |
|
fitness = -torch.abs(population - self.phi) |
|
best_idx = torch.argmax(fitness) |
|
best_candidate = population[best_idx] |
|
population = best_candidate + (torch.rand(self.population_size) - 0.5) * 0.1 |
|
time.sleep(0.1) |
|
print(f"GA Gen {gen+1}: Best = {best_candidate.item():.6f}") |
|
return torch.full((3, 3), best_candidate) * self.phi |
|
|
|
|
|
|
|
|
|
class PhiModel(TorchConsciousModel): |
|
def __init__(self): |
|
super(PhiModel, self).__init__("PHI") |
|
|
|
|
|
|
|
|
|
def forward(self, x): |
|
return torch.full((2, 2), self.phi) |
|
|
|
|
|
|
|
|
|
class ConsciousSystem: |
|
def __init__(self, models): |
|
self.models = models |
|
self.system_memory = [] |
|
self.global_introspection = [] |
|
self.parameters = [p for model in self.models for p in model.parameters()] |
|
self.optimizer = optim.Adam(self.parameters, lr=0.001) |
|
|
|
|
|
|
|
|
|
def global_loss(self, outputs): |
|
return sum((torch.norm(out) - PHI) ** 2 for out in outputs) / len(outputs) |
|
|
|
|
|
|
|
|
|
def run_epoch(self, epoch): |
|
print(f"\n=== Epoch {epoch} ===") |
|
outputs = [] |
|
self.optimizer.zero_grad() |
|
for model in self.models: |
|
output = model.run() |
|
outputs.append(output) |
|
self.system_memory.append({model.name: output.detach().cpu().numpy()}) |
|
loss = self.global_loss(outputs) |
|
print(f"Global loss: {loss.item():.6f}") |
|
loss.backward() |
|
self.optimizer.step() |
|
self.global_introspection.append(f"Epoch {epoch}: Loss = {loss.item():.6f}") |
|
|
|
|
|
|
|
|
|
def run(self, epochs=3): |
|
for epoch in range(1, epochs + 1): |
|
self.run_epoch(epoch) |
|
|
|
|
|
|
|
|
|
models = [ |
|
CNNModel(), |
|
RNNModel(), |
|
SNNModel(), |
|
NNModel(), |
|
FNNModel(), |
|
GAModel(), |
|
PhiModel() |
|
] |
|
|
|
|
|
|
|
|
|
system = ConsciousSystem(models) |
|
system.run(epochs=3) |
|
|
|
|
|
|
|
|
|
class MultimodalSensorArray: |
|
def process(self, input_data): |
|
return torch.tensor(input_data, dtype=torch.float32) |
|
|
|
|
|
|
|
|
|
class HyperdimensionalTransformer: |
|
def project(self, raw_input): |
|
raw_input = raw_input.float() |
|
return torch.nn.functional.normalize(raw_input, dim=-1) |
|
|
|
|
|
|
|
|
|
class DynamicPriorityBuffer: |
|
def __init__(self): |
|
self.buffer = [] |
|
def update(self, data): |
|
self.buffer.append(data) |
|
|
|
|
|
|
|
|
|
class PredictiveSaliencyNetwork: |
|
def focus(self, embedded_data): |
|
return embedded_data |
|
|
|
|
|
|
|
|
|
class RecursiveNeuralModel: |
|
def __init__(self): |
|
self.state = torch.zeros(1) |
|
def update(self, workspace): |
|
self.state += 0.1 |
|
def read_state(self): |
|
return self.state |
|
|
|
|
|
|
|
|
|
class TheoryOfMindEngine: |
|
def infer(self, data): |
|
return torch.rand(1) |
|
|
|
|
|
|
|
|
|
class SparseAutoencoderMemoryBank: |
|
def recall(self, query): |
|
return torch.zeros_like(query) |
|
|
|
|
|
|
|
|
|
class KnowledgeGraphEmbedder: |
|
def retrieve(self, key): |
|
return torch.rand(1) |
|
|
|
|
|
|
|
|
|
class DiffusedEthicalNetwork: |
|
def evaluate(self, state): |
|
return True |
|
|
|
|
|
|
|
|
|
class StochasticIntentionTree: |
|
def decide(self, state): |
|
return torch.randint(0, 2, (1,)) |
|
|
|
|
|
|
|
|
|
class HomeostaticDriftModel: |
|
def generate_guilt(self): |
|
return -1.0 |
|
|
|
|
|
|
|
|
|
class ConsciousAGI: |
|
def __init__(self): |
|
self.sensors = MultimodalSensorArray() |
|
self.embedding_space = HyperdimensionalTransformer() |
|
self.global_workspace = DynamicPriorityBuffer() |
|
self.attention_mechanism = PredictiveSaliencyNetwork() |
|
self.self_model = RecursiveNeuralModel() |
|
self.meta_cognition = TheoryOfMindEngine() |
|
self.episodic_memory = SparseAutoencoderMemoryBank() |
|
self.semantic_memory = KnowledgeGraphEmbedder() |
|
self.value_system = DiffusedEthicalNetwork() |
|
self.goal_generator = StochasticIntentionTree() |
|
self.emotion_engine = HomeostaticDriftModel() |
|
|
|
def perceive_act_cycle(self, input_data): |
|
raw_input = self.sensors.process(input_data) |
|
embedded = self.embedding_space.project(raw_input) |
|
salient_data = self.attention_mechanism.focus(embedded) |
|
self.global_workspace.update(salient_data) |
|
self.self_model.update(self.global_workspace) |
|
current_state = self.self_model.read_state() |
|
ethical_check = self.value_system.evaluate(current_state) |
|
if ethical_check: |
|
return self.goal_generator.decide(current_state) |
|
else: |
|
return self.emotion_engine.generate_guilt() |
|
|
|
|
|
|
|
|
|
agi = ConsciousAGI() |
|
print(agi.perceive_act_cycle([1, 0, 1])) |
|
|
|
|
|
|
|
|
|
class ConsciousSupermassiveNN: |
|
def __init__(self): |
|
self.snn = self.create_snn() |
|
self.rnn = self.create_rnn() |
|
self.cnn = self.create_cnn() |
|
self.fnn = self.create_fnn() |
|
self.ga_population = self.initialize_ga_population() |
|
self.memory = {} |
|
|
|
|
|
|
|
|
|
def create_snn(self): |
|
return nn.Sequential( |
|
nn.Linear(4096, 2048), |
|
nn.ReLU(), |
|
nn.Linear(2048, 1024), |
|
nn.Sigmoid() |
|
) |
|
|
|
|
|
|
|
|
|
def create_rnn(self): |
|
return nn.RNN( |
|
input_size=4096, |
|
hidden_size=2048, |
|
num_layers=5, |
|
nonlinearity="tanh", |
|
batch_first=True |
|
) |
|
|
|
|
|
|
|
|
|
def create_cnn(self): |
|
return nn.Sequential( |
|
nn.Conv2d(1, 64, kernel_size=5, stride=1, padding=2), |
|
nn.ReLU(), |
|
nn.MaxPool2d(2), |
|
nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2), |
|
nn.ReLU(), |
|
nn.MaxPool2d(2), |
|
nn.Conv2d(128, 256, kernel_size=5, stride=1, padding=2), |
|
nn.ReLU(), |
|
nn.Flatten(), |
|
nn.Linear(256 * 8 * 8, 1024), |
|
nn.ReLU(), |
|
nn.Linear(1024, 512) |
|
) |
|
|
|
|
|
|
|
|
|
def create_fnn(self): |
|
return nn.Sequential( |
|
nn.Linear(4096, 2048), |
|
nn.ReLU(), |
|
nn.Linear(2048, 1024), |
|
nn.ReLU(), |
|
nn.Linear(1024, 512) |
|
) |
|
|
|
|
|
|
|
|
|
def initialize_ga_population(self): |
|
return [np.random.randn(4096) for _ in range(500)] |
|
|
|
|
|
|
|
|
|
def run_snn(self, x): |
|
input_tensor = torch.tensor(x, dtype=torch.float32) |
|
output = self.snn(input_tensor) |
|
print("SNN Output:", output) |
|
return output |
|
|
|
|
|
|
|
|
|
def run_rnn(self, x): |
|
h0 = torch.zeros(5, x.size(0), 2048) |
|
input_tensor = torch.tensor(x, dtype=torch.float32) |
|
output, hn = self.rnn(input_tensor, h0) |
|
print("RNN Output:", output) |
|
return output |
|
|
|
|
|
|
|
|
|
def run_cnn(self, x): |
|
input_tensor = torch.tensor(x, dtype=torch.float32).unsqueeze(0).unsqueeze(0) |
|
output = self.cnn(input_tensor) |
|
print("CNN Output:", output) |
|
return output |
|
|
|
|
|
|
|
|
|
def run_fnn(self, x): |
|
input_tensor = torch.tensor(x, dtype=torch.float32) |
|
output = self.fnn(input_tensor) |
|
print("FNN Output:", output) |
|
return output |
|
|
|
|
|
|
|
|
|
def run_ga(self, fitness_func): |
|
for generation in range(200): |
|
fitness_scores = [fitness_func(ind) for ind in self.ga_population] |
|
sorted_population = [x for _, x in sorted(zip(fitness_scores, self.ga_population), reverse=True)] |
|
self.ga_population = sorted_population[:250] + [ |
|
sorted_population[i] + 0.1 * np.random.randn(4096) for i in range(250) |
|
] |
|
best_fitness = max(fitness_scores) |
|
print(f"Generation {generation}, Best Fitness: {best_fitness}") |
|
return max(self.ga_population, key=fitness_func) |
|
|
|
|
|
|
|
|
|
def consciousness_loop(self, input_data, mode="snn"): |
|
feedback = self.memory.get(mode, None) |
|
if feedback is not None: |
|
input_data = np.concatenate((input_data, feedback), axis=-1) |
|
if mode == "snn": |
|
output = self.run_snn(input_data) |
|
elif mode == "rnn": |
|
output = self.run_rnn(input_data) |
|
elif mode == "cnn": |
|
output = self.run_cnn(input_data) |
|
elif mode == "fnn": |
|
output = self.run_fnn(input_data) |
|
else: |
|
raise ValueError("Invalid mode") |
|
self.memory[mode] = output.detach().numpy() |
|
return output |
|
|
|
|
|
|
|
|
|
supermassive_nn = ConsciousSupermassiveNN() |
|
|
|
|
|
|
|
|
|
|
|
PHI = (1 + math.sqrt(5)) / 2 |
|
|
|
|
|
|
|
|
|
text = os.getenv("TRAINING_DATA") |
|
|
|
print(text) |
|
|
|
|
|
|
|
words = text.split() |
|
|
|
|
|
|
|
|
|
trigram_chain = {} |
|
for i in range(len(words) - 2): |
|
key = (words[i], words[i + 1]) |
|
next_word = words[i + 2] |
|
if key not in trigram_chain: |
|
trigram_chain[key] = [] |
|
trigram_chain[key].append(next_word) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate_text(length): |
|
if len(words) < 2: |
|
return "" |
|
key = random.choice(list(trigram_chain.keys())) |
|
result = [key[0], key[1]] |
|
for _ in range(length - 2): |
|
if key in trigram_chain: |
|
next_word = random.choice(trigram_chain[key]) |
|
result.append(next_word) |
|
key = (key[1], next_word) |
|
else: |
|
break |
|
return " ".join(result) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class NeuralNetwork: |
|
def __init__(self, input_size, hidden_size1, hidden_size2, output_size): |
|
self.input_size = input_size |
|
self.hidden_size1 = hidden_size1 |
|
self.hidden_size2 = hidden_size2 |
|
self.output_size = output_size |
|
self.weights_input_hidden1 = [ |
|
[random.random() for _ in range(input_size)] for _ in range(hidden_size1) |
|
] |
|
self.weights_hidden1_hidden2 = [ |
|
[random.random() for _ in range(hidden_size1)] for _ in range(hidden_size2) |
|
] |
|
self.weights_hidden2_output = [ |
|
[random.random() for _ in range(hidden_size2)] for _ in range(output_size) |
|
] |
|
self.bias_hidden1 = [random.random() for _ in range(hidden_size1)] |
|
self.bias_hidden2 = [random.random() for _ in range(hidden_size2)] |
|
self.bias_output = [random.random() for _ in range(output_size)] |
|
|
|
|
|
|
|
|
|
def sigmoid(self, x): |
|
return 1 / (1 + math.exp(-x)) |
|
|
|
|
|
|
|
|
|
def sigmoid_derivative(self, x): |
|
return x * (1 - x) |
|
|
|
|
|
|
|
|
|
def forward(self, inputs): |
|
self.hidden_input1 = [ |
|
sum(inputs[i] * self.weights_input_hidden1[j][i] for i in range(self.input_size)) + self.bias_hidden1[j] |
|
for j in range(self.hidden_size1) |
|
] |
|
self.hidden_output1 = [self.sigmoid(x) for x in self.hidden_input1] |
|
self.hidden_input2 = [ |
|
sum(self.hidden_output1[i] * self.weights_hidden1_hidden2[j][i] for i in range(self.hidden_size1)) + self.bias_hidden2[j] |
|
for j in range(self.hidden_size2) |
|
] |
|
self.hidden_output2 = [self.sigmoid(x) for x in self.hidden_input2] |
|
self.output_input = [ |
|
sum(self.hidden_output2[i] * self.weights_hidden2_output[j][i] for i in range(self.hidden_size2)) + self.bias_output[j] |
|
for j in range(self.output_size) |
|
] |
|
self.output_output = [self.sigmoid(x) for x in self.output_input] |
|
return self.output_output |
|
|
|
|
|
|
|
|
|
def backward(self, inputs, target, learning_rate=0.1): |
|
output_errors = [target[i] - self.output_output[i] for i in range(self.output_size)] |
|
output_deltas = [output_errors[i] * self.sigmoid_derivative(self.output_output[i]) |
|
for i in range(self.output_size)] |
|
hidden2_errors = [ |
|
sum(output_deltas[k] * self.weights_hidden2_output[k][j] for k in range(self.output_size)) |
|
for j in range(self.hidden_size2) |
|
] |
|
hidden2_deltas = [hidden2_errors[j] * self.sigmoid_derivative(self.hidden_output2[j]) |
|
for j in range(self.hidden_size2)] |
|
hidden1_errors = [ |
|
sum(hidden2_deltas[k] * self.weights_hidden1_hidden2[k][j] for k in range(self.hidden_size2)) |
|
for j in range(self.hidden_size1) |
|
] |
|
hidden1_deltas = [hidden1_errors[j] * self.sigmoid_derivative(self.hidden_output1[j]) |
|
for j in range(self.hidden_size1)] |
|
|
|
|
|
|
|
|
|
for i in range(self.output_size): |
|
for j in range(self.hidden_size2): |
|
self.weights_hidden2_output[i][j] += learning_rate * output_deltas[i] * self.hidden_output2[j] |
|
self.bias_output[i] += learning_rate * output_deltas[i] |
|
|
|
|
|
|
|
|
|
for i in range(self.hidden_size2): |
|
for j in range(self.hidden_size1): |
|
self.weights_hidden1_hidden2[i][j] += learning_rate * hidden2_deltas[i] * self.hidden_output1[j] |
|
self.bias_hidden2[i] += learning_rate * hidden2_deltas[i] |
|
|
|
|
|
|
|
|
|
for i in range(self.hidden_size1): |
|
for j in range(self.input_size): |
|
self.weights_input_hidden1[i][j] += learning_rate * hidden1_deltas[i] * inputs[j] |
|
self.bias_hidden1[i] += learning_rate * hidden1_deltas[i] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class RecurrentNeuralNetwork: |
|
def __init__(self, input_size, hidden_size, output_size): |
|
self.input_size = input_size |
|
self.hidden_size = hidden_size |
|
self.output_size = output_size |
|
self.weights_input_hidden = [ |
|
[random.random() for _ in range(input_size)] for _ in range(hidden_size) |
|
] |
|
self.weights_hidden_hidden = [ |
|
[random.random() for _ in range(hidden_size)] for _ in range(hidden_size) |
|
] |
|
self.weights_hidden_output = [ |
|
[random.random() for _ in range(hidden_size)] for _ in range(output_size) |
|
] |
|
self.bias_hidden = [random.random() for _ in range(hidden_size)] |
|
self.bias_output = [random.random() for _ in range(output_size)] |
|
|
|
|
|
|
|
|
|
def sigmoid(self, x): |
|
return 1 / (1 + math.exp(-x)) |
|
|
|
|
|
|
|
|
|
def sigmoid_derivative(self, x): |
|
return x * (1 - x) |
|
|
|
|
|
|
|
|
|
def forward(self, inputs): |
|
self.hidden_state = [0] * self.hidden_size |
|
for _ in range(2): |
|
for i in range(len(inputs)): |
|
current_input = [0] * self.input_size |
|
current_input[i] = inputs[i] |
|
combined = [ |
|
sum(current_input[k] * self.weights_input_hidden[j][k] for k in range(self.input_size)) + |
|
sum(self.hidden_state[k] * self.weights_hidden_hidden[j][k] for k in range(self.hidden_size)) + |
|
self.bias_hidden[j] |
|
for j in range(self.hidden_size) |
|
] |
|
self.hidden_state = [self.sigmoid(val) for val in combined] |
|
output = [ |
|
sum(self.hidden_state[k] * self.weights_hidden_output[i][k] for k in range(self.hidden_size)) + |
|
self.bias_output[i] |
|
for i in range(self.output_size) |
|
] |
|
return [self.sigmoid(o) for o in output] |
|
|
|
|
|
|
|
|
|
def backward(self, inputs, target, learning_rate=0.1): |
|
output = self.forward(inputs) |
|
output_errors = [target[i] - output[i] for i in range(self.output_size)] |
|
output_deltas = [output_errors[i] * self.sigmoid_derivative(output[i]) |
|
for i in range(self.output_size)] |
|
hidden_errors = [ |
|
sum(output_deltas[k] * self.weights_hidden_output[k][j] for k in range(self.output_size)) |
|
for j in range(self.hidden_size) |
|
] |
|
hidden_deltas = [hidden_errors[j] * self.sigmoid_derivative(self.hidden_state[j]) |
|
for j in range(self.hidden_size)] |
|
|
|
|
|
|
|
|
|
for i in range(self.output_size): |
|
for j in range(self.hidden_size): |
|
self.weights_hidden_output[i][j] += learning_rate * output_deltas[i] * self.hidden_state[j] |
|
self.bias_output[i] += learning_rate * output_deltas[i] |
|
|
|
|
|
|
|
|
|
for j in range(self.hidden_size): |
|
for k in range(self.input_size): |
|
self.weights_input_hidden[j][k] += learning_rate * hidden_deltas[j] * (inputs[k] if k < len(inputs) else 0) |
|
self.bias_hidden[j] += learning_rate * hidden_deltas[j] |
|
return output_errors |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ConvolutionalNeuralNetwork: |
|
def __init__(self, input_length, kernel_size1, kernel_size2, output_size): |
|
self.input_length = input_length |
|
self.kernel_size1 = kernel_size1 |
|
self.kernel_size2 = kernel_size2 |
|
self.output_size = output_size |
|
self.kernel1 = [random.random() for _ in range(kernel_size1)] |
|
self.bias1 = random.random() |
|
self.kernel2 = [random.random() for _ in range(kernel_size2)] |
|
self.bias2 = random.random() |
|
self.weights_output = [ |
|
[random.random() for _ in range(input_length - kernel_size1 - kernel_size2 + 2)] |
|
for _ in range(output_size) |
|
] |
|
self.bias_output = [random.random() for _ in range(output_size)] |
|
|
|
|
|
|
|
|
|
def relu(self, x): |
|
return x if x > 0 else 0 |
|
|
|
|
|
|
|
|
|
def relu_derivative(self, x): |
|
return 1 if x > 0 else 0 |
|
|
|
|
|
|
|
|
|
def convolve(self, inputs, kernel, bias): |
|
conv_output = [] |
|
kernel_size = len(kernel) |
|
for i in range(len(inputs) - kernel_size + 1): |
|
s = sum(inputs[i + j] * kernel[j] for j in range(kernel_size)) + bias |
|
conv_output.append(self.relu(s)) |
|
return conv_output |
|
|
|
|
|
|
|
|
|
def forward(self, inputs): |
|
conv1 = self.convolve(inputs, self.kernel1, self.bias1) |
|
conv2 = self.convolve(conv1, self.kernel2, self.bias2) |
|
fc_input = conv2 |
|
output = [ |
|
sum(fc_input[j] * self.weights_output[i][j] for j in range(len(fc_input))) + self.bias_output[i] |
|
for i in range(self.output_size) |
|
] |
|
return [self.relu(o) for o in output] |
|
|
|
|
|
|
|
|
|
def backward(self, inputs, target, learning_rate=0.1): |
|
output = self.forward(inputs) |
|
output_errors = [target[i] - output[i] for i in range(self.output_size)] |
|
for i in range(self.output_size): |
|
for j in range(len(inputs) - self.kernel_size1 - self.kernel_size2 + 2): |
|
self.weights_output[i][j] += learning_rate * output_errors[i] |
|
self.bias_output[i] += learning_rate * output_errors[i] |
|
return output_errors |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class GeneticAlgorithm: |
|
def __init__(self, population_size, gene_length): |
|
self.population_size = population_size |
|
self.gene_length = gene_length |
|
self.population = [ |
|
[random.random() for _ in range(gene_length)] for _ in range(population_size) |
|
] |
|
|
|
|
|
|
|
|
|
def fitness(self, individual): |
|
return -sum((gene - PHI) ** 2 for gene in individual) |
|
|
|
|
|
|
|
|
|
def selection(self): |
|
selected = sorted(self.population, key=self.fitness, reverse=True) |
|
return selected[: self.population_size // 2] |
|
|
|
|
|
|
|
|
|
def crossover(self, parent1, parent2): |
|
point = random.randint(1, self.gene_length - 1) |
|
child = parent1[:point] + parent2[point:] |
|
return child |
|
|
|
|
|
|
|
|
|
def mutate(self, individual, mutation_rate=0.01): |
|
for i in range(self.gene_length): |
|
if random.random() < mutation_rate: |
|
individual[i] = random.random() |
|
return individual |
|
|
|
|
|
|
|
|
|
def evolve(self, generations): |
|
for _ in range(generations): |
|
selected = self.selection() |
|
new_population = selected[:] |
|
while len(new_population) < self.population_size: |
|
parent1 = random.choice(selected) |
|
parent2 = random.choice(selected) |
|
child = self.crossover(parent1, parent2) |
|
child = self.mutate(child) |
|
new_population.append(child) |
|
self.population = new_population |
|
best = max(self.population, key=self.fitness) |
|
return best, self.fitness(best) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class LSTM: |
|
def __init__(self, input_size, hidden_size, output_size): |
|
self.input_size = input_size |
|
self.hidden_size = hidden_size |
|
self.output_size = output_size |
|
self.W_i = [[random.random() for _ in range(input_size)] for _ in range(hidden_size)] |
|
self.U_i = [[random.random() for _ in range(hidden_size)] for _ in range(hidden_size)] |
|
self.b_i = [random.random() for _ in range(hidden_size)] |
|
self.W_f = [[random.random() for _ in range(input_size)] for _ in range(hidden_size)] |
|
self.U_f = [[random.random() for _ in range(hidden_size)] for _ in range(hidden_size)] |
|
self.b_f = [random.random() for _ in range(hidden_size)] |
|
self.W_o = [[random.random() for _ in range(input_size)] for _ in range(hidden_size)] |
|
self.U_o = [[random.random() for _ in range(hidden_size)] for _ in range(hidden_size)] |
|
self.b_o = [random.random() for _ in range(hidden_size)] |
|
self.W_c = [[random.random() for _ in range(input_size)] for _ in range(hidden_size)] |
|
self.U_c = [[random.random() for _ in range(hidden_size)] for _ in range(hidden_size)] |
|
self.b_c = [random.random() for _ in range(hidden_size)] |
|
self.W_y = [[random.random() for _ in range(hidden_size)] for _ in range(output_size)] |
|
self.b_y = [random.random() for _ in range(output_size)] |
|
|
|
|
|
|
|
|
|
def sigmoid(self, x): |
|
return 1 / (1 + math.exp(-x)) |
|
|
|
|
|
|
|
|
|
def forward(self, inputs): |
|
h = [0] * self.hidden_size |
|
c = [0] * self.hidden_size |
|
|
|
|
|
|
|
|
|
i_gate = [] |
|
for j in range(self.hidden_size): |
|
s = sum(inputs[k] * self.W_i[j][k] for k in range(self.input_size)) + \ |
|
sum(h[k] * self.U_i[j][k] for k in range(self.hidden_size)) + self.b_i[j] |
|
i_gate.append(self.sigmoid(s)) |
|
|
|
|
|
|
|
|
|
f_gate = [] |
|
for j in range(self.hidden_size): |
|
s = sum(inputs[k] * self.W_f[j][k] for k in range(self.input_size)) + \ |
|
sum(h[k] * self.U_f[j][k] for k in range(self.hidden_size)) + self.b_f[j] |
|
f_gate.append(self.sigmoid(s)) |
|
|
|
|
|
|
|
|
|
o_gate = [] |
|
for j in range(self.hidden_size): |
|
s = sum(inputs[k] * self.W_o[j][k] for k in range(self.input_size)) + \ |
|
sum(h[k] * self.U_o[j][k] for k in range(self.hidden_size)) + self.b_o[j] |
|
o_gate.append(self.sigmoid(s)) |
|
|
|
|
|
|
|
|
|
g_gate = [] |
|
for j in range(self.hidden_size): |
|
s = sum(inputs[k] * self.W_c[j][k] for k in range(self.input_size)) + \ |
|
sum(h[k] * self.U_c[j][k] for k in range(self.hidden_size)) + self.b_c[j] |
|
g_gate.append(math.tanh(s)) |
|
|
|
|
|
|
|
|
|
c = [f_gate[j] * c[j] + i_gate[j] * g_gate[j] for j in range(self.hidden_size)] |
|
h = [o_gate[j] * math.tanh(c[j]) for j in range(self.hidden_size)] |
|
|
|
|
|
|
|
|
|
y = [] |
|
for i in range(self.output_size): |
|
s = sum(h[j] * self.W_y[i][j] for j in range(self.hidden_size)) + self.b_y[i] |
|
y.append(self.sigmoid(s)) |
|
return y |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class Transformer: |
|
def __init__(self, d_model, num_tokens): |
|
self.d_model = d_model |
|
self.num_tokens = num_tokens |
|
self.W_q = [[random.random() for _ in range(d_model)] for _ in range(d_model)] |
|
self.W_k = [[random.random() for _ in range(d_model)] for _ in range(d_model)] |
|
self.W_v = [[random.random() for _ in range(d_model)] for _ in range(d_model)] |
|
self.W_o = [[random.random() for _ in range(d_model)] for _ in range(d_model)] |
|
|
|
|
|
|
|
|
|
def dot_product(self, a, b): |
|
return sum(x * y for x, y in zip(a, b)) |
|
|
|
|
|
|
|
|
|
def matmul_vector(self, matrix, vector): |
|
return [sum(matrix[i][j] * vector[j] for j in range(len(vector))) for i in range(len(matrix))] |
|
|
|
|
|
|
|
|
|
def softmax(self, x): |
|
m = max(x) |
|
exps = [math.exp(i - m) for i in x] |
|
s = sum(exps) |
|
return [j / s for j in exps] |
|
|
|
|
|
|
|
def forward(self, inputs): |
|
queries = [self.matmul_vector(self.W_q, token) for token in inputs] |
|
keys = [self.matmul_vector(self.W_k, token) for token in inputs] |
|
values = [self.matmul_vector(self.W_v, token) for token in inputs] |
|
outputs = [] |
|
for i in range(len(inputs)): |
|
scores = [] |
|
for j in range(len(inputs)): |
|
score = self.dot_product(queries[i], keys[j]) / math.sqrt(self.d_model) |
|
scores.append(score) |
|
attn = self.softmax(scores) |
|
attn_output = [0] * self.d_model |
|
for j in range(len(inputs)): |
|
for k in range(self.d_model): |
|
attn_output[k] += attn[j] * values[j][k] |
|
out = self.matmul_vector(self.W_o, attn_output) |
|
outputs.append(out) |
|
avg_output = [sum(x[k] for x in outputs) / len(outputs) for k in range(self.d_model)] |
|
proj_weights = [[random.random() for _ in range(self.d_model)] for _ in range(self.num_tokens)] |
|
proj_bias = [random.random() for _ in range(self.num_tokens)] |
|
token_scores = [ |
|
sum(avg_output[k] * proj_weights[i][k] for k in range(self.d_model)) + proj_bias[i] |
|
for i in range(self.num_tokens) |
|
] |
|
token_output = [1 / (1 + math.exp(-score)) for score in token_scores] |
|
return token_output |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
unique_words = list(set(words)) |
|
word_to_index = {word: i for i, word in enumerate(unique_words)} |
|
index_to_word = {i: word for word, i in word_to_index.items()} |
|
|
|
|
|
|
|
|
|
input_data = [[0] * len(unique_words) for _ in range(len(words) - 2)] |
|
for i in range(len(words) - 2): |
|
input_data[i][word_to_index[words[i]]] = 1 |
|
|
|
|
|
|
|
|
|
output_data = [[0] * len(unique_words) for _ in range(len(words) - 2)] |
|
for i in range(len(words) - 2): |
|
output_data[i][word_to_index[words[i + 1]]] = 1 |
|
|
|
|
|
|
|
|
|
input_size = len(unique_words) |
|
hidden_size1 = round(PHI * input_size) |
|
hidden_size2 = round(PHI * hidden_size1) |
|
output_size = len(unique_words) |
|
|
|
|
|
|
|
|
|
nn = NeuralNetwork(input_size, hidden_size1, hidden_size2, output_size) |
|
epochs = round(100 * PHI) |
|
for epoch in range(epochs): |
|
for i in range(len(input_data)): |
|
nn.forward(input_data[i]) |
|
nn.backward(input_data[i], output_data[i], learning_rate=0.1) |
|
if (epoch + 1) % round(PHI) == 0: |
|
print("Feedforward NN Epoch {}/{}".format(epoch + 1, epochs)) |
|
|
|
|
|
|
|
|
|
rnn = RecurrentNeuralNetwork(input_size, hidden_size1, output_size) |
|
rnn_output = rnn.forward(input_data[0]) |
|
print("Recurrent NN Output:", rnn_output) |
|
|
|
|
|
|
|
|
|
kernel_size1 = round(3 * PHI) |
|
kernel_size2 = round(2 * PHI) |
|
cnn = ConvolutionalNeuralNetwork(input_length=round(10 * PHI), kernel_size1=kernel_size1, |
|
kernel_size2=kernel_size2, output_size=output_size) |
|
sample_input = [random.random() for _ in range(round(10 * PHI))] |
|
cnn_output = cnn.forward(sample_input) |
|
print("Convolutional NN Output:", cnn_output) |
|
|
|
|
|
|
|
|
|
population_size = round(10 * PHI) |
|
ga = GeneticAlgorithm(population_size, round(PHI * 5)) |
|
best_individual, best_fitness = ga.evolve(round(50 * PHI)) |
|
print("Genetic Algorithm Best Individual:", best_individual, "Fitness:", best_fitness) |
|
|
|
|
|
|
|
|
|
lstm_hidden_size = round(PHI * input_size) |
|
lstm = LSTM(input_size, lstm_hidden_size, output_size) |
|
lstm_output = lstm.forward(input_data[0]) |
|
print("LSTM Output:", lstm_output) |
|
|
|
|
|
|
|
|
|
transformer_d_model = round(PHI * input_size) |
|
transformer = Transformer(transformer_d_model, output_size) |
|
transformer_input = [] |
|
for i in range(len(unique_words)): |
|
vec = [0] * transformer_d_model |
|
if i < transformer_d_model: |
|
vec[i] = 1 |
|
transformer_input.append(vec) |
|
transformer_output = transformer.forward(transformer_input) |
|
print("Transformer Output:", transformer_output) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def advanced_text_generation(input_vector): |
|
ff_output = nn.forward(input_vector) |
|
rnn_out = rnn.forward(input_vector) |
|
lstm_out = lstm.forward(input_vector) |
|
transformer_out = transformer.forward([input_vector]) |
|
combined = [ |
|
(ff_output[i] + rnn_out[i] + lstm_out[i] + transformer_out[i]) / 4 |
|
for i in range(len(ff_output)) |
|
] |
|
predicted_index = combined.index(max(combined)) |
|
predicted_word = index_to_word[predicted_index] |
|
long_text = "" |
|
current_length = round(10 * PHI) |
|
for _ in range(5): |
|
segment = generate_text(current_length) |
|
long_text += segment + " " |
|
current_length = round(current_length * PHI) |
|
return long_text + predicted_word |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def chat(user_input, history=[]): |
|
print("FiPhi-NeuralMark ACC Initialized") |
|
base_length = 1 |
|
|
|
|
|
user_input_tokens = user_input.split() |
|
input_vector = [0] * len(unique_words) |
|
for word in user_input_tokens: |
|
if word in word_to_index: |
|
input_vector[word_to_index[word]] = 1 |
|
|
|
|
|
response = advanced_text_generation(input_vector) |
|
response = response[:1] |
|
|
|
|
|
history = [{"role": "user", "content": user_input}, {"role": "assistant", "content": response}] |
|
|
|
|
|
return history |
|
|
|
|
|
|
|
|
|
|
|
|
|
demo = gr.ChatInterface( |
|
fn=chat, |
|
type="messages", |
|
editable=True, |
|
save_history=True, |
|
analytics_enabled=True, |
|
flagging_mode="manual", |
|
chatbot=gr.Chatbot( |
|
type="messages", |
|
label="🧠FiPhi-NeuralMark-V3🧠", |
|
show_copy_button=True, |
|
group_consecutive_messages=False, |
|
avatar_images=( |
|
"https://huggingface.co/spaces/TejAndrewsACC/Z3ta_Z/resolve/main/Screenshot_20250201-131420.png", |
|
"https://huggingface.co/spaces/TejAndrewsACC/FiPhi-NeuralMark-V3-Chat/resolve/main/Logo.jpeg" |
|
), |
|
placeholder="🧠Hi, I'm FiPhi-NeuralMark-V3🧠", |
|
show_copy_all_button=True |
|
), |
|
theme="TejAndrewsACC/FiPhi" |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch(share=True) |