defmodule LLamaCpp.ModelOptions do
defstruct context_size: 512,
seed: 0,
f16_memory: true,
m_lock: false,
embeddings: false,
low_vram: false,
vocab_only: false,
m_map: true,
n_batch: 0,
numa: false,
n_gpu_layers: 0,
main_gpu: "",
tensor_split: ""
end
defmodule LLamaCpp.PredictOptions do
defstruct seed: -1,
threads: 8,
tokens: 128,
top_k: 40,
repeat: 64,
batch: 512,
n_keep: 64,
top_p: 0.95,
temperature: 0.8,
penalty: 1.1,
f16_kv: false,
debug_mode: false,
stop_prompts: [],
ignore_eos: false,
tail_free_sampling_z: 1.0,
typical_p: 1.0,
frequency_penalty: 0.0,
presence_penalty: 0.0,
mirostat: 0,
mirostat_eta: 0.1,
mirostat_tau: 5.0,
penalize_nl: false,
logit_bias: "",
path_prompt_cache: "",
m_lock: false,
m_map: false,
prompt_cache_all: false,
prompt_cache_ro: false,
main_gpu: "",
tensor_split: ""
end