nqlm / modeling_nqlm.py
tommytracx's picture
Add modeling_nqlm.py
26ecc99 verified
"""
NeuralQuantum NQLM Model Implementation for Hugging Face Transformers
"""
import torch
import torch.nn as nn
from transformers import PreTrainedModel
from transformers.modeling_outputs import CausalLMOutputWithPast
from configuration_nqlm import NeuralQuantumNQLMConfig
class QuantumLayer(nn.Module):
"""Quantum-inspired layer for enhanced processing"""
def __init__(self, config):
super().__init__()
self.config = config
self.quantum_circuit_depth = config.quantum_circuit_depth
self.hidden_size = config.hidden_size
# Quantum-inspired parameters
self.quantum_weights = nn.Parameter(torch.randn(self.quantum_circuit_depth, self.hidden_size, self.hidden_size))
self.quantum_bias = nn.Parameter(torch.randn(self.hidden_size))
def forward(self, hidden_states):
# Simulate quantum circuit operations
for i in range(self.quantum_circuit_depth):
# Apply quantum-inspired transformation
hidden_states = torch.matmul(hidden_states, self.quantum_weights[i])
hidden_states = torch.tanh(hidden_states) # Non-linear activation
return hidden_states + self.quantum_bias
class NeuralQuantumAttention(nn.Module):
"""Quantum-enhanced attention mechanism"""
def __init__(self, config):
super().__init__()
self.config = config
self.num_attention_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.head_dim = self.hidden_size // self.num_attention_heads
self.query = nn.Linear(self.hidden_size, self.hidden_size)
self.key = nn.Linear(self.hidden_size, self.hidden_size)
self.value = nn.Linear(self.hidden_size, self.hidden_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
# Quantum enhancement layer
self.quantum_layer = QuantumLayer(config)
def forward(self, hidden_states, attention_mask=None):
batch_size, seq_len, hidden_size = hidden_states.size()
# Apply quantum enhancement
quantum_enhanced = self.quantum_layer(hidden_states)
# Standard attention computation
query = self.query(quantum_enhanced)
key = self.key(quantum_enhanced)
value = self.value(quantum_enhanced)
# Reshape for multi-head attention
query = query.view(batch_size, seq_len, self.num_attention_heads, self.head_dim).transpose(1, 2)
key = key.view(batch_size, seq_len, self.num_attention_heads, self.head_dim).transpose(1, 2)
value = value.view(batch_size, seq_len, self.num_attention_heads, self.head_dim).transpose(1, 2)
# Compute attention scores
attention_scores = torch.matmul(query, key.transpose(-2, -1)) / (self.head_dim ** 0.5)
if attention_mask is not None:
attention_scores = attention_scores.masked_fill(attention_mask == 0, -1e9)
attention_probs = torch.softmax(attention_scores, dim=-1)
attention_probs = self.dropout(attention_probs)
# Apply attention to values
context = torch.matmul(attention_probs, value)
context = context.transpose(1, 2).contiguous().view(batch_size, seq_len, hidden_size)
return context
class NeuralQuantumBlock(nn.Module):
"""NeuralQuantum transformer block"""
def __init__(self, config):
super().__init__()
self.config = config
self.attention = NeuralQuantumAttention(config)
self.ln_1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = nn.Sequential(
nn.Linear(config.hidden_size, config.intermediate_size),
nn.GELU(),
nn.Linear(config.intermediate_size, config.hidden_size),
nn.Dropout(config.hidden_dropout_prob)
)
self.ln_2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None):
# Self-attention with residual connection
attn_output = self.attention(hidden_states, attention_mask)
hidden_states = self.ln_1(hidden_states + attn_output)
# MLP with residual connection
mlp_output = self.mlp(hidden_states)
hidden_states = self.ln_2(hidden_states + mlp_output)
return hidden_states
class NeuralQuantumNQLMForCausalLM(PreTrainedModel):
"""NeuralQuantum NQLM model for causal language modeling"""
config_class = NeuralQuantumNQLMConfig
def __init__(self, config):
super().__init__(config)
self.config = config
# Embeddings
self.wte = nn.Embedding(config.vocab_size, config.hidden_size)
self.wpe = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.drop = nn.Dropout(config.hidden_dropout_prob)
# Transformer blocks
self.h = nn.ModuleList([
NeuralQuantumBlock(config) for _ in range(config.num_hidden_layers)
])
# Output layer
self.ln_f = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights
self.init_weights()
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, seq_len = input_ids.size()
# Position embeddings
if position_ids is None:
position_ids = torch.arange(0, seq_len, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand(batch_size, -1)
# Input embeddings
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
hidden_states = self.drop(hidden_states)
# Transformer blocks
for i, block in enumerate(self.h):
hidden_states = block(hidden_states, attention_mask)
# Final layer norm
hidden_states = self.ln_f(hidden_states)
# Language modeling head
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
if not return_dict:
output = (logits,) + (None,) * 6
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=None,
hidden_states=None,
attentions=None,
)
def generate(self, input_ids, max_length=50, temperature=1.0, do_sample=True, **kwargs):
"""Generate text using the model"""
self.eval()
with torch.no_grad():
for _ in range(max_length - input_ids.size(1)):
# Get logits for the last token
outputs = self.forward(input_ids)
logits = outputs.logits[:, -1, :] / temperature
if do_sample:
probs = torch.softmax(logits, dim=-1)
next_token = torch.multinomial(probs, 1)
else:
next_token = torch.argmax(logits, dim=-1, keepdim=True)
input_ids = torch.cat([input_ids, next_token], dim=1)
return input_ids