Instructions to use Deci/DeciLM-7B with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use Deci/DeciLM-7B with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="Deci/DeciLM-7B", trust_remote_code=True) messages = [ {"role": "user", "content": "Who are you?"}, ] pipe(messages)# Load model directly from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("Deci/DeciLM-7B", trust_remote_code=True, dtype="auto") - Notebooks
- Google Colab
- Kaggle
- Local Apps
- vLLM
How to use Deci/DeciLM-7B with vLLM:
Install from pip and serve model
# Install vLLM from pip: pip install vllm # Start the vLLM server: vllm serve "Deci/DeciLM-7B" # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:8000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "Deci/DeciLM-7B", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'Use Docker
docker model run hf.co/Deci/DeciLM-7B
- SGLang
How to use Deci/DeciLM-7B with SGLang:
Install from pip and serve model
# Install SGLang from pip: pip install sglang # Start the SGLang server: python3 -m sglang.launch_server \ --model-path "Deci/DeciLM-7B" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "Deci/DeciLM-7B", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }'Use Docker images
docker run --gpus all \ --shm-size 32g \ -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=<secret>" \ --ipc=host \ lmsysorg/sglang:latest \ python3 -m sglang.launch_server \ --model-path "Deci/DeciLM-7B" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/chat/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "Deci/DeciLM-7B", "messages": [ { "role": "user", "content": "What is the capital of France?" } ] }' - Docker Model Runner
How to use Deci/DeciLM-7B with Docker Model Runner:
docker model run hf.co/Deci/DeciLM-7B
| # coding=utf-8 | |
| # Copyright and license in the repo. | |
| """ PyTorch DeciLM model.""" | |
| from .version_check import check_transformers_version | |
| check_transformers_version() | |
| from typing import List, Optional, Tuple, Union | |
| import torch | |
| import torch.nn.functional as F | |
| import torch.utils.checkpoint | |
| from torch import nn | |
| from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES | |
| from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging | |
| from .configuration_decilm import DeciLMConfig | |
| from .transformers_v4_35_2__modeling_attn_mask_utils import _prepare_4d_causal_attention_mask | |
| from .transformers_v4_35_2__modeling_llama import LlamaMLP, LlamaRMSNorm, LlamaAttention, apply_rotary_pos_emb, \ | |
| repeat_kv, LlamaPreTrainedModel, LLAMA_START_DOCSTRING, LlamaDecoderLayer, LlamaForCausalLM, LlamaModel, \ | |
| BaseModelOutputWithPast, LLAMA_INPUTS_DOCSTRING | |
| MODEL_FOR_CAUSAL_LM_MAPPING_NAMES["deci"] = "DeciLMForCausalLM" | |
| _CONFIG_FOR_DOC = "DeciLMConfig" | |
| logger = logging.get_logger(__name__) | |
| class DeciLMAttention(LlamaAttention): | |
| """Multi-headed attention from 'Attention Is All You Need' paper""" | |
| def __init__(self, config: DeciLMConfig, layer_idx: int): | |
| nn.Module.__init__(self) | |
| self.config = config | |
| self.hidden_size = config.hidden_size | |
| self.num_heads = config.num_attention_heads | |
| self.head_dim = self.hidden_size // self.num_heads | |
| self.layer_idx = layer_idx | |
| self.num_key_value_heads = config.num_key_value_heads_per_layer[layer_idx] | |
| self.num_key_value_groups = self.num_heads // self.num_key_value_heads | |
| self.pretraining_tp = config.pretraining_tp | |
| self.max_position_embeddings = config.max_position_embeddings | |
| self.rope_theta = getattr(config, 'rope_theta', None) | |
| if (self.head_dim * self.num_heads) != self.hidden_size: | |
| raise ValueError( | |
| f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" | |
| f" and `num_heads`: {self.num_heads})." | |
| ) | |
| self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) | |
| self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) | |
| self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) | |
| self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) | |
| self._init_rope() | |
| def forward( | |
| self, | |
| hidden_states: torch.Tensor, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| position_ids: Optional[torch.LongTensor] = None, | |
| past_key_value: Optional[Tuple[torch.Tensor]] = None, | |
| output_attentions: bool = False, | |
| use_cache: bool = False, | |
| **kwargs, | |
| ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: | |
| bsz, q_len, _ = hidden_states.size() | |
| is_decode = past_key_value is not None | |
| if self.pretraining_tp > 1: | |
| key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.pretraining_tp | |
| query_slices = self.q_proj.weight.split((self.num_heads * self.head_dim) // self.pretraining_tp, dim=0) | |
| key_slices = self.k_proj.weight.split(key_value_slicing, dim=0) | |
| value_slices = self.v_proj.weight.split(key_value_slicing, dim=0) | |
| query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.pretraining_tp)] | |
| query_states = torch.cat(query_states, dim=-1) | |
| key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.pretraining_tp)] | |
| key_states = torch.cat(key_states, dim=-1) | |
| value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.pretraining_tp)] | |
| value_states = torch.cat(value_states, dim=-1) | |
| else: | |
| query_states = self.q_proj(hidden_states) | |
| key_states = self.k_proj(hidden_states) | |
| value_states = self.v_proj(hidden_states) | |
| query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) | |
| key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) | |
| value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) | |
| kv_seq_len = key_states.shape[-2] | |
| if past_key_value is not None: | |
| kv_seq_len += past_key_value[0].shape[-2] | |
| cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) | |
| query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) | |
| if past_key_value is not None: | |
| # reuse k, v, self_attention | |
| key_states = torch.cat([past_key_value[0], key_states], dim=2) | |
| value_states = torch.cat([past_key_value[1], value_states], dim=2) | |
| past_key_value = (key_states, value_states) if use_cache else None | |
| # repeat k/v heads if n_kv_heads < n_heads | |
| key_states = repeat_kv(key_states, self.num_key_value_groups) | |
| value_states = repeat_kv(value_states, self.num_key_value_groups) | |
| if is_decode: | |
| with torch.backends.cuda.sdp_kernel(enable_math=True, enable_flash=True, | |
| enable_mem_efficient=attention_mask is None): | |
| attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, | |
| is_causal=False, | |
| attn_mask=attention_mask) | |
| attn_output = attn_output.contiguous().view(bsz, q_len, self.hidden_size) | |
| else: | |
| with torch.backends.cuda.sdp_kernel(enable_math=True, enable_flash=False, enable_mem_efficient=False): | |
| attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, | |
| is_causal=attention_mask is None, | |
| attn_mask=attention_mask) | |
| if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): | |
| raise ValueError( | |
| f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" | |
| f" {attn_output.size()}" | |
| ) | |
| attn_output = attn_output.transpose(1, 2).contiguous().view(bsz, q_len, self.hidden_size) | |
| if self.pretraining_tp > 1: | |
| attn_output = attn_output.split(self.hidden_size // self.pretraining_tp, dim=2) | |
| o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.pretraining_tp, dim=1) | |
| attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.pretraining_tp)]) | |
| else: | |
| attn_output = self.o_proj(attn_output) | |
| attn_weights = None | |
| return attn_output, attn_weights, past_key_value | |
| class DeciLMDecoderLayer(LlamaDecoderLayer): | |
| def __init__(self, config: DeciLMConfig, layer_idx: int): | |
| nn.Module.__init__(self) | |
| self.hidden_size = config.hidden_size | |
| self.layer_idx = layer_idx | |
| self.self_attn = DeciLMAttention(config=config, layer_idx=layer_idx) | |
| self.mlp = LlamaMLP(config) | |
| self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) | |
| self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) | |
| class DeciLMPreTrainedModel(LlamaPreTrainedModel): | |
| config_class = DeciLMConfig | |
| _no_split_modules = ["DeciLMDecoderLayer"] | |
| _keys_to_ignore_on_load_missing = ["self_attn.rotary_emb.inv_freq"] | |
| class DeciLMModel(LlamaModel, DeciLMPreTrainedModel): | |
| """ | |
| Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`DeciLMDecoderLayer`] | |
| Args: | |
| config: DeciLMConfig | |
| """ | |
| def __init__(self, config: DeciLMConfig): | |
| DeciLMPreTrainedModel.__init__(self, config) | |
| self.padding_idx = config.pad_token_id | |
| self.vocab_size = config.vocab_size | |
| self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) | |
| self.layers = nn.ModuleList([DeciLMDecoderLayer(config, layer_idx) for layer_idx | |
| in range(config.num_hidden_layers)]) | |
| self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) | |
| self.gradient_checkpointing = False | |
| # Initialize weights and apply final processing | |
| self.post_init() | |
| def forward( | |
| self, | |
| input_ids: torch.LongTensor = None, | |
| attention_mask: Optional[torch.Tensor] = None, | |
| position_ids: Optional[torch.LongTensor] = None, | |
| past_key_values: Optional[List[torch.FloatTensor]] = None, | |
| inputs_embeds: Optional[torch.FloatTensor] = None, | |
| use_cache: Optional[bool] = None, | |
| output_attentions: Optional[bool] = None, | |
| output_hidden_states: Optional[bool] = None, | |
| return_dict: Optional[bool] = None, | |
| ) -> Union[Tuple, BaseModelOutputWithPast]: | |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions | |
| output_hidden_states = ( | |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states | |
| ) | |
| use_cache = use_cache if use_cache is not None else self.config.use_cache | |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict | |
| # retrieve input_ids and inputs_embeds | |
| if input_ids is not None and inputs_embeds is not None: | |
| raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") | |
| elif input_ids is not None: | |
| batch_size, seq_length = input_ids.shape[:2] | |
| elif inputs_embeds is not None: | |
| batch_size, seq_length = inputs_embeds.shape[:2] | |
| else: | |
| raise ValueError("You have to specify either input_ids or inputs_embeds") | |
| past_key_values_length = 0 | |
| if past_key_values is not None: | |
| past_key_values_length = past_key_values[0][0].shape[2] | |
| if position_ids is None: | |
| device = input_ids.device if input_ids is not None else inputs_embeds.device | |
| position_ids = torch.arange( | |
| past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device | |
| ) | |
| position_ids = position_ids.unsqueeze(0) | |
| if inputs_embeds is None: | |
| inputs_embeds = self.embed_tokens(input_ids) | |
| attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None | |
| if attention_mask is not None: | |
| # 4d mask is passed through the layers | |
| attention_mask = _prepare_4d_causal_attention_mask( | |
| attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length | |
| ) | |
| # embed positions | |
| hidden_states = inputs_embeds | |
| if self.gradient_checkpointing and self.training: | |
| if use_cache: | |
| logger.warning_once( | |
| "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." | |
| ) | |
| use_cache = False | |
| # decoder layers | |
| all_hidden_states = () if output_hidden_states else None | |
| all_self_attns = () if output_attentions else None | |
| next_decoder_cache = () if use_cache else None | |
| for idx, decoder_layer in enumerate(self.layers): | |
| if output_hidden_states: | |
| all_hidden_states += (hidden_states,) | |
| past_key_value = past_key_values[idx] if past_key_values is not None else None | |
| if self.gradient_checkpointing and self.training: | |
| layer_outputs = self._gradient_checkpointing_func( | |
| decoder_layer.__call__, | |
| hidden_states, | |
| attention_mask, | |
| position_ids, | |
| past_key_value, | |
| output_attentions, | |
| use_cache, | |
| ) | |
| else: | |
| layer_outputs = decoder_layer( | |
| hidden_states, | |
| attention_mask=attention_mask, | |
| position_ids=position_ids, | |
| past_key_value=past_key_value, | |
| output_attentions=output_attentions, | |
| use_cache=use_cache, | |
| ) | |
| hidden_states = layer_outputs[0] | |
| if use_cache: | |
| next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) | |
| if output_attentions: | |
| all_self_attns += (layer_outputs[1],) | |
| hidden_states = self.norm(hidden_states) | |
| # add hidden states from the last decoder layer | |
| if output_hidden_states: | |
| all_hidden_states += (hidden_states,) | |
| next_cache = next_decoder_cache if use_cache else None | |
| if not return_dict: | |
| return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) | |
| return BaseModelOutputWithPast( | |
| last_hidden_state=hidden_states, | |
| past_key_values=next_cache, | |
| hidden_states=all_hidden_states, | |
| attentions=all_self_attns, | |
| ) | |
| class DeciLMForCausalLM(LlamaForCausalLM, DeciLMPreTrainedModel): | |
| def __init__(self, config): | |
| DeciLMPreTrainedModel.__init__(self, config) | |
| self.model = DeciLMModel(config) | |
| self.pretraining_tp = config.pretraining_tp | |
| self.vocab_size = config.vocab_size | |
| self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) | |
| # Initialize weights and apply final processing | |
| self.post_init() | |