logql

package
v1.9.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Dec 28, 2025 License: AGPL-3.0 Imports: 16 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type BinOp

type BinOp struct {
	Op  string `json:"op"`
	LHS LogAST `json:"lhs"`
	RHS LogAST `json:"rhs"`
}

type Kind

type Kind string
const (
	KindLogSelector Kind = "log_selector" // `{app="api"}` (maybe with pipeline)
	KindLogRange    Kind = "log_range"    // `{...} ... [5m] offset 1m`
	KindRangeAgg    Kind = "range_agg"    // `rate|count_over_time|... ( <log_range> )`
	KindVector      Kind = "vector"       // number literal
	KindVectorAgg   Kind = "vector_agg"   // `sum|avg|... by(...) ( <vector/sample> )`
	KindBinOp       Kind = "binop"        // `<sample> <op> <sample>`
	KindScalar      Kind = "scalar_literal"
	KindOpaque      Kind = "opaque"
)

type LAggNode

type LAggNode struct {
	// Vector aggregation (sum/avg/min/max/count … by/without)
	Op      string
	Param   *int // e.g. 3 for topk(3, ...)
	By      []string
	Without []string
	Child   LExecNode
}

type LBinOpNode

type LBinOpNode struct {
	Op       string
	LHS, RHS LExecNode
}

type LExecNode

type LExecNode interface{}

type LLeafNode

type LLeafNode struct {
	Leaf LogLeaf
}

type LQueryPlan

type LQueryPlan struct {
	Root    LExecNode
	Leaves  []LogLeaf
	TagName string // Set this to a tag name to get distinct values for that tag
}

func CompileLog

func CompileLog(root LogAST) (LQueryPlan, error)

type LRangeAggNode

type LRangeAggNode struct {
	// Range aggregation over logs (rate/bytes_rate/count_over_time/…)
	Op    string
	Param *float64
	Child LExecNode // should be an LLeafNode or a subtree that ends at a leaf
}

type LScalarNode

type LScalarNode struct {
	Value float64
}

type LabelFilter

type LabelFilter struct {
	Label string  `json:"label"`
	Op    MatchOp `json:"op"` // =, !=, =~, !~
	Value string  `json:"value"`

	// Where did this appear in the pipeline?
	AfterParser bool `json:"afterParser,omitempty"` // true if it appeared after some parser
	ParserIdx   *int `json:"parserIdx,omitempty"`   // index in LogSelector.Parsers (0-based), if AfterParser
}

type LabelFormatExpr

type LabelFormatExpr struct {
	Out  string `json:"out"`            // target label/column name
	Tmpl string `json:"tmpl,omitempty"` // original template (normalized, optional for debug)
	SQL  string `json:"sql"`            // compiled DuckDB SQL expression
}

type LabelMatch

type LabelMatch struct {
	Label string  `json:"label"`
	Op    MatchOp `json:"op"`
	Value string  `json:"value"`
}

type LineFilter

type LineFilter struct {
	Op        LineFilterOp `json:"op"`
	Match     string       `json:"match"`
	ParserIdx *int         `json:"parserIdx,omitempty"`
}

type LineFilterOp

type LineFilterOp string
const (
	LineContains    LineFilterOp = "contains"     // |= "foo"
	LineNotContains LineFilterOp = "not_contains" // != "foo"
	LineRegex       LineFilterOp = "regex"        // |~ "re"
	LineNotRegex    LineFilterOp = "not_regex"    // !~ "re"
)

type LogAST

type LogAST struct {
	Kind      Kind         `json:"kind"`
	LogSel    *LogSelector `json:"logSelector,omitempty"`
	LogRange  *LogRange    `json:"logRange,omitempty"`
	RangeAgg  *RangeAgg    `json:"rangeAgg,omitempty"`
	Vector    *Vector      `json:"vector,omitempty"`
	VectorAgg *VectorAgg   `json:"vectorAgg,omitempty"`
	BinOp     *BinOp       `json:"binop,omitempty"`
	Scalar    *float64     `json:"scalar,omitempty"`
	Raw       string       `json:"raw"`
}

func FromLogQL

func FromLogQL(input string) (LogAST, error)

FromLogQL parses input with the official Loki LogQL parser and returns a simplified LogAST.

func (*LogAST) CollectPipelines

func (a *LogAST) CollectPipelines() []*LogRange

CollectPipelines returns every pipeline (LogRange) in evaluation order (left→right).

func (*LogAST) FirstPipeline

func (a *LogAST) FirstPipeline() (*LogSelector, *LogRange, bool)

FirstPipeline is a generic variant that can start from any AST node.

func (*LogAST) IsAggregateExpr added in v1.3.4

func (a *LogAST) IsAggregateExpr() bool

type LogLeaf

type LogLeaf struct {
	ID string `json:"id"`

	// Pipeline (left→right) captured at selector level
	Matchers     []LabelMatch  `json:"matchers,omitempty"`
	LineFilters  []LineFilter  `json:"lineFilters,omitempty"`
	LabelFilters []LabelFilter `json:"labelFilters,omitempty"`
	Parsers      []ParserStage `json:"parsers,omitempty"`

	// Time shape attached to this pipeline (from enclosing LogRange)
	Range  string `json:"range,omitempty"`  // e.g. "5m"
	Offset string `json:"offset,omitempty"` // e.g. "1m"
	Unwrap bool   `json:"unwrap,omitempty"` // rare, but carry it

	// If this leaf is the input to a range aggregation, note the op
	RangeAggOp string   `json:"rangeAggOp,omitempty"` // e.g. "count_over_time", "rate"
	RangeParam *float64 `json:"rangeParam,omitempty"`

	// Optional: the immediate vector-agg goal above this leaf (helps worker decide grouping)
	OutBy      []string `json:"outBy,omitempty"`
	OutWithout []string `json:"outWithout,omitempty"`
}

func (*LogLeaf) IsSimpleAggregation added in v1.7.1

func (be *LogLeaf) IsSimpleAggregation() bool

IsSimpleAggregation returns true if this leaf can use an optimized flat SQL query for aggregation (no CTE pipeline needed). This is the case when: - No parsers (json, regexp, logfmt, label_format, unwrap, etc.) - No line filters (which require scanning log_message content) - No label filters (which might depend on parsed labels)

For simple aggregations like count_over_time grouped by existing columns, we can generate a single SELECT with GROUP BY that only reads the columns we actually need, rather than SELECT * through a CTE pipeline.

func (*LogLeaf) Label

func (be *LogLeaf) Label() string

func (*LogLeaf) ToSpansWorkerSQL added in v1.4.0

func (be *LogLeaf) ToSpansWorkerSQL(limit int, order string, fields []string) string

ToSpansWorkerSQL generates SQL for spans queries with span_name and span_kind as default fields

func (*LogLeaf) ToSpansWorkerSQLWithLimit added in v1.4.0

func (be *LogLeaf) ToSpansWorkerSQLWithLimit(limit int, order string, fields []string) string

ToSpansWorkerSQLWithLimit is a convenience wrapper for ToSpansWorkerSQL

func (*LogLeaf) ToWorkerSQL

func (be *LogLeaf) ToWorkerSQL(limit int, order string, fields []string) string

func (*LogLeaf) ToWorkerSQLForTagValues

func (be *LogLeaf) ToWorkerSQLForTagValues(tagName string) string

func (*LogLeaf) ToWorkerSQLWithLimit

func (be *LogLeaf) ToWorkerSQLWithLimit(limit int, order string, fields []string) string

ToWorkerSQLWithLimit is like ToWorkerSQL but (for non-aggregated leaves) appends

type LogRange

type LogRange struct {
	Selector LogSelector `json:"selector"`
	Range    string      `json:"range"`  // e.g. "5m"
	Offset   string      `json:"offset"` // e.g. "1m"
	Unwrap   bool        `json:"unwrap"`
}

type LogSelector

type LogSelector struct {
	Matchers     []LabelMatch  `json:"matchers"`
	LineFilters  []LineFilter  `json:"lineFilters,omitempty"`
	LabelFilters []LabelFilter `json:"labelFilters,omitempty"`
	Parsers      []ParserStage `json:"parsers,omitempty"` // json, logfmt, regexp, label_format, keep/drop, unwrap, etc.
}

type MatchOp

type MatchOp string
const (
	MatchEq  MatchOp = "="
	MatchNe  MatchOp = "!="
	MatchRe  MatchOp = "=~"
	MatchNre MatchOp = "!~"
	MatchGt  MatchOp = ">"
	MatchGte MatchOp = ">="
	MatchLt  MatchOp = "<"
	MatchLte MatchOp = "<="
)

type ParserStage

type ParserStage struct {
	Type         string            `json:"type"`                   // json|logfmt|regexp|label_format|keep_labels|...
	Params       map[string]string `json:"params"`                 // optional (e.g. regexp pattern)
	Filters      []LabelFilter     `json:"filters,omitempty"`      // label filters that follow this parser
	LabelFormats []LabelFormatExpr `json:"labelFormats,omitempty"` // ONLY for label_format
}

func MergeConsecutiveKeyParsers added in v1.4.2

func MergeConsecutiveKeyParsers(parsers []ParserStage) []ParserStage

MergeConsecutiveKeyParsers merges adjacent json/logfmt stages by unioning their Params

type RangeAgg

type RangeAgg struct {
	Op      string   `json:"op"`              // rate, count_over_time, bytes_rate, quantile_over_time, ...
	Param   *float64 `json:"param,omitempty"` // e.g. 0.99
	By      []string `json:"by,omitempty"`
	Without []string `json:"without,omitempty"`
	Left    LogRange `json:"left"`
}

type Vector

type Vector struct {
	Literal *float64 `json:"literal,omitempty"`
}

type VectorAgg

type VectorAgg struct {
	Op      string   `json:"op"`
	Param   *int     `json:"param,omitempty"` // e.g. 3 for topk(3, ...)
	By      []string `json:"by,omitempty"`
	Without []string `json:"without,omitempty"`
	Left    LogAST   `json:"left"` // vector or sample expression
}