llama_core/
error.rs

1//! Error types for the Llama Core library.
2
3use thiserror::Error;
4
5/// Error types for the Llama Core library.
6#[derive(Error, Debug)]
7pub enum LlamaCoreError {
8    /// Errors in General operation.
9    #[error("{0}")]
10    Operation(String),
11    /// Errors in Context initialization.
12    #[error("Failed to initialize computation context. Reason: {0}")]
13    InitContext(String),
14    /// Errors thrown by the wasi-nn-ggml plugin and runtime.
15    #[error("{0}")]
16    Backend(#[from] BackendError),
17    /// Errors thrown by the Search Backend
18    #[cfg(feature = "search")]
19    #[cfg_attr(docsrs, doc(cfg(feature = "search")))]
20    #[error("{0}")]
21    Search(String),
22    /// Errors in file not found.
23    #[error("File not found.")]
24    FileNotFound,
25    /// Errors in Qdrant.
26    #[cfg(feature = "rag")]
27    #[cfg_attr(docsrs, doc(cfg(feature = "rag")))]
28    #[error("Qdrant error:{0}")]
29    Qdrant(String),
30}
31
32/// Error types for wasi-nn errors.
33#[derive(Error, Debug)]
34pub enum BackendError {
35    /// Errors in setting the input tensor.
36    #[error("{0}")]
37    SetInput(String),
38    /// Errors in the model inference.
39    #[error("{0}")]
40    Compute(String),
41    /// Errors in the model inference in the stream mode.
42    #[error("{0}")]
43    ComputeSingle(String),
44    /// Errors in getting the output tensor.
45    #[error("{0}")]
46    GetOutput(String),
47    /// Errors in getting the output tensor in the stream mode.
48    #[error("{0}")]
49    GetOutputSingle(String),
50    /// Errors in cleaning up the computation context in the stream mode.
51    #[error("{0}")]
52    FinishSingle(String),
53}