Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    build_default_decimal_type,
  11    build_timestamp_from_parts,
  12    date_delta_sql,
  13    date_trunc_to_time,
  14    datestrtodate_sql,
  15    build_formatted_time,
  16    if_sql,
  17    inline_array_sql,
  18    max_or_greatest,
  19    min_or_least,
  20    rename_func,
  21    timestamptrunc_sql,
  22    timestrtotime_sql,
  23    var_map_sql,
  24    map_date_part,
  25)
  26from sqlglot.helper import flatten, is_float, is_int, seq_get
  27from sqlglot.tokens import TokenType
  28
  29if t.TYPE_CHECKING:
  30    from sqlglot._typing import E
  31
  32
  33# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  34def _build_datetime(
  35    name: str, kind: exp.DataType.Type, safe: bool = False
  36) -> t.Callable[[t.List], exp.Func]:
  37    def _builder(args: t.List) -> exp.Func:
  38        value = seq_get(args, 0)
  39        int_value = value is not None and is_int(value.name)
  40
  41        if isinstance(value, exp.Literal):
  42            # Converts calls like `TO_TIME('01:02:03')` into casts
  43            if len(args) == 1 and value.is_string and not int_value:
  44                return exp.cast(value, kind)
  45
  46            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  47            # cases so we can transpile them, since they're relatively common
  48            if kind == exp.DataType.Type.TIMESTAMP:
  49                if int_value:
  50                    return exp.UnixToTime(this=value, scale=seq_get(args, 1))
  51                if not is_float(value.this):
  52                    return build_formatted_time(exp.StrToTime, "snowflake")(args)
  53
  54        if kind == exp.DataType.Type.DATE and not int_value:
  55            formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
  56            formatted_exp.set("safe", safe)
  57            return formatted_exp
  58
  59        return exp.Anonymous(this=name, expressions=args)
  60
  61    return _builder
  62
  63
  64def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  65    expression = parser.build_var_map(args)
  66
  67    if isinstance(expression, exp.StarMap):
  68        return expression
  69
  70    return exp.Struct(
  71        expressions=[
  72            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  73        ]
  74    )
  75
  76
  77def _build_datediff(args: t.List) -> exp.DateDiff:
  78    return exp.DateDiff(
  79        this=seq_get(args, 2), expression=seq_get(args, 1), unit=map_date_part(seq_get(args, 0))
  80    )
  81
  82
  83def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
  84    def _builder(args: t.List) -> E:
  85        return expr_type(
  86            this=seq_get(args, 2),
  87            expression=seq_get(args, 1),
  88            unit=map_date_part(seq_get(args, 0)),
  89        )
  90
  91    return _builder
  92
  93
  94# https://docs.snowflake.com/en/sql-reference/functions/div0
  95def _build_if_from_div0(args: t.List) -> exp.If:
  96    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0))
  97    true = exp.Literal.number(0)
  98    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
  99    return exp.If(this=cond, true=true, false=false)
 100
 101
 102# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 103def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 104    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 105    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 106
 107
 108# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 109def _build_if_from_nullifzero(args: t.List) -> exp.If:
 110    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 111    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 112
 113
 114def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 115    flag = expression.text("flag")
 116
 117    if "i" not in flag:
 118        flag += "i"
 119
 120    return self.func(
 121        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 122    )
 123
 124
 125def _build_convert_timezone(args: t.List) -> t.Union[exp.Anonymous, exp.AtTimeZone]:
 126    if len(args) == 3:
 127        return exp.Anonymous(this="CONVERT_TIMEZONE", expressions=args)
 128    return exp.AtTimeZone(this=seq_get(args, 1), zone=seq_get(args, 0))
 129
 130
 131def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 132    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 133
 134    if not regexp_replace.args.get("replacement"):
 135        regexp_replace.set("replacement", exp.Literal.string(""))
 136
 137    return regexp_replace
 138
 139
 140def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 141    def _parse(self: Snowflake.Parser) -> exp.Show:
 142        return self._parse_show_snowflake(*args, **kwargs)
 143
 144    return _parse
 145
 146
 147def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 148    trunc = date_trunc_to_time(args)
 149    trunc.set("unit", map_date_part(trunc.args["unit"]))
 150    return trunc
 151
 152
 153def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 154    """
 155    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 156    so we need to unqualify them.
 157
 158    Example:
 159        >>> from sqlglot import parse_one
 160        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 161        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 162        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 163    """
 164    if isinstance(expression, exp.Pivot) and expression.unpivot:
 165        expression = transforms.unqualify_columns(expression)
 166
 167    return expression
 168
 169
 170def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 171    assert isinstance(expression, exp.Create)
 172
 173    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 174        if expression.this in exp.DataType.NESTED_TYPES:
 175            expression.set("expressions", None)
 176        return expression
 177
 178    props = expression.args.get("properties")
 179    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 180        for schema_expression in expression.this.expressions:
 181            if isinstance(schema_expression, exp.ColumnDef):
 182                column_type = schema_expression.kind
 183                if isinstance(column_type, exp.DataType):
 184                    column_type.transform(_flatten_structured_type, copy=False)
 185
 186    return expression
 187
 188
 189class Snowflake(Dialect):
 190    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 191    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 192    NULL_ORDERING = "nulls_are_large"
 193    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 194    SUPPORTS_USER_DEFINED_TYPES = False
 195    SUPPORTS_SEMI_ANTI_JOIN = False
 196    PREFER_CTE_ALIAS_COLUMN = True
 197    TABLESAMPLE_SIZE_IS_PERCENT = True
 198    COPY_PARAMS_ARE_CSV = False
 199
 200    TIME_MAPPING = {
 201        "YYYY": "%Y",
 202        "yyyy": "%Y",
 203        "YY": "%y",
 204        "yy": "%y",
 205        "MMMM": "%B",
 206        "mmmm": "%B",
 207        "MON": "%b",
 208        "mon": "%b",
 209        "MM": "%m",
 210        "mm": "%m",
 211        "DD": "%d",
 212        "dd": "%-d",
 213        "DY": "%a",
 214        "dy": "%w",
 215        "HH24": "%H",
 216        "hh24": "%H",
 217        "HH12": "%I",
 218        "hh12": "%I",
 219        "MI": "%M",
 220        "mi": "%M",
 221        "SS": "%S",
 222        "ss": "%S",
 223        "FF": "%f",
 224        "ff": "%f",
 225        "FF6": "%f",
 226        "ff6": "%f",
 227    }
 228
 229    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 230        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 231        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 232        if (
 233            isinstance(expression, exp.Identifier)
 234            and isinstance(expression.parent, exp.Table)
 235            and expression.name.lower() == "dual"
 236        ):
 237            return expression  # type: ignore
 238
 239        return super().quote_identifier(expression, identify=identify)
 240
 241    class Parser(parser.Parser):
 242        IDENTIFY_PIVOT_STRINGS = True
 243        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 244        COLON_IS_JSON_EXTRACT = True
 245
 246        ID_VAR_TOKENS = {
 247            *parser.Parser.ID_VAR_TOKENS,
 248            TokenType.MATCH_CONDITION,
 249        }
 250
 251        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 252        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 253
 254        FUNCTIONS = {
 255            **parser.Parser.FUNCTIONS,
 256            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 257            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
 258            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
 259            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 260                this=seq_get(args, 1), expression=seq_get(args, 0)
 261            ),
 262            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 263                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 264                start=seq_get(args, 0),
 265                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 266                step=seq_get(args, 2),
 267            ),
 268            "BITXOR": binary_from_function(exp.BitwiseXor),
 269            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 270            "BOOLXOR": binary_from_function(exp.Xor),
 271            "CONVERT_TIMEZONE": _build_convert_timezone,
 272            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 273            "DATE_TRUNC": _date_trunc_to_time,
 274            "DATEADD": _build_date_time_add(exp.DateAdd),
 275            "DATEDIFF": _build_datediff,
 276            "DIV0": _build_if_from_div0,
 277            "FLATTEN": exp.Explode.from_arg_list,
 278            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 279                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 280            ),
 281            "IFF": exp.If.from_arg_list,
 282            "LAST_DAY": lambda args: exp.LastDay(
 283                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 284            ),
 285            "LISTAGG": exp.GroupConcat.from_arg_list,
 286            "MEDIAN": lambda args: exp.PercentileCont(
 287                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 288            ),
 289            "NULLIFZERO": _build_if_from_nullifzero,
 290            "OBJECT_CONSTRUCT": _build_object_construct,
 291            "REGEXP_REPLACE": _build_regexp_replace,
 292            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
 293            "RLIKE": exp.RegexpLike.from_arg_list,
 294            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 295            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 296            "TIMEDIFF": _build_datediff,
 297            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 298            "TIMESTAMPDIFF": _build_datediff,
 299            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 300            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 301            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 302            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 303            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 304            "TO_NUMBER": lambda args: exp.ToNumber(
 305                this=seq_get(args, 0),
 306                format=seq_get(args, 1),
 307                precision=seq_get(args, 2),
 308                scale=seq_get(args, 3),
 309            ),
 310            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 311            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 312            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 313            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 314            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 315            "TO_VARCHAR": exp.ToChar.from_arg_list,
 316            "ZEROIFNULL": _build_if_from_zeroifnull,
 317        }
 318
 319        FUNCTION_PARSERS = {
 320            **parser.Parser.FUNCTION_PARSERS,
 321            "DATE_PART": lambda self: self._parse_date_part(),
 322            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 323        }
 324        FUNCTION_PARSERS.pop("TRIM")
 325
 326        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 327
 328        RANGE_PARSERS = {
 329            **parser.Parser.RANGE_PARSERS,
 330            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 331            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 332        }
 333
 334        ALTER_PARSERS = {
 335            **parser.Parser.ALTER_PARSERS,
 336            "UNSET": lambda self: self.expression(
 337                exp.Set,
 338                tag=self._match_text_seq("TAG"),
 339                expressions=self._parse_csv(self._parse_id_var),
 340                unset=True,
 341            ),
 342            "SWAP": lambda self: self._parse_alter_table_swap(),
 343        }
 344
 345        STATEMENT_PARSERS = {
 346            **parser.Parser.STATEMENT_PARSERS,
 347            TokenType.SHOW: lambda self: self._parse_show(),
 348        }
 349
 350        PROPERTY_PARSERS = {
 351            **parser.Parser.PROPERTY_PARSERS,
 352            "LOCATION": lambda self: self._parse_location_property(),
 353        }
 354
 355        TYPE_CONVERTERS = {
 356            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 357            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 358        }
 359
 360        SHOW_PARSERS = {
 361            "SCHEMAS": _show_parser("SCHEMAS"),
 362            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 363            "OBJECTS": _show_parser("OBJECTS"),
 364            "TERSE OBJECTS": _show_parser("OBJECTS"),
 365            "TABLES": _show_parser("TABLES"),
 366            "TERSE TABLES": _show_parser("TABLES"),
 367            "VIEWS": _show_parser("VIEWS"),
 368            "TERSE VIEWS": _show_parser("VIEWS"),
 369            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 370            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 371            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 372            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 373            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 374            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 375            "SEQUENCES": _show_parser("SEQUENCES"),
 376            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 377            "COLUMNS": _show_parser("COLUMNS"),
 378            "USERS": _show_parser("USERS"),
 379            "TERSE USERS": _show_parser("USERS"),
 380        }
 381
 382        CONSTRAINT_PARSERS = {
 383            **parser.Parser.CONSTRAINT_PARSERS,
 384            "WITH": lambda self: self._parse_with_constraint(),
 385            "MASKING": lambda self: self._parse_with_constraint(),
 386            "PROJECTION": lambda self: self._parse_with_constraint(),
 387            "TAG": lambda self: self._parse_with_constraint(),
 388        }
 389
 390        STAGED_FILE_SINGLE_TOKENS = {
 391            TokenType.DOT,
 392            TokenType.MOD,
 393            TokenType.SLASH,
 394        }
 395
 396        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 397
 398        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 399
 400        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 401
 402        LAMBDAS = {
 403            **parser.Parser.LAMBDAS,
 404            TokenType.ARROW: lambda self, expressions: self.expression(
 405                exp.Lambda,
 406                this=self._replace_lambda(
 407                    self._parse_assignment(),
 408                    expressions,
 409                ),
 410                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 411            ),
 412        }
 413
 414        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 415            if self._prev.token_type != TokenType.WITH:
 416                self._retreat(self._index - 1)
 417
 418            if self._match_text_seq("MASKING", "POLICY"):
 419                policy = self._parse_column()
 420                return self.expression(
 421                    exp.MaskingPolicyColumnConstraint,
 422                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 423                    expressions=self._match(TokenType.USING)
 424                    and self._parse_wrapped_csv(self._parse_id_var),
 425                )
 426            if self._match_text_seq("PROJECTION", "POLICY"):
 427                policy = self._parse_column()
 428                return self.expression(
 429                    exp.ProjectionPolicyColumnConstraint,
 430                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 431                )
 432            if self._match(TokenType.TAG):
 433                return self.expression(
 434                    exp.TagColumnConstraint,
 435                    expressions=self._parse_wrapped_csv(self._parse_property),
 436                )
 437
 438            return None
 439
 440        def _parse_create(self) -> exp.Create | exp.Command:
 441            expression = super()._parse_create()
 442            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 443                # Replace the Table node with the enclosed Identifier
 444                expression.this.replace(expression.this.this)
 445
 446            return expression
 447
 448        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 449        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 450        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 451            this = self._parse_var() or self._parse_type()
 452
 453            if not this:
 454                return None
 455
 456            self._match(TokenType.COMMA)
 457            expression = self._parse_bitwise()
 458            this = map_date_part(this)
 459            name = this.name.upper()
 460
 461            if name.startswith("EPOCH"):
 462                if name == "EPOCH_MILLISECOND":
 463                    scale = 10**3
 464                elif name == "EPOCH_MICROSECOND":
 465                    scale = 10**6
 466                elif name == "EPOCH_NANOSECOND":
 467                    scale = 10**9
 468                else:
 469                    scale = None
 470
 471                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 472                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 473
 474                if scale:
 475                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 476
 477                return to_unix
 478
 479            return self.expression(exp.Extract, this=this, expression=expression)
 480
 481        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 482            if is_map:
 483                # Keys are strings in Snowflake's objects, see also:
 484                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 485                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 486                return self._parse_slice(self._parse_string())
 487
 488            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 489
 490        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 491            lateral = super()._parse_lateral()
 492            if not lateral:
 493                return lateral
 494
 495            if isinstance(lateral.this, exp.Explode):
 496                table_alias = lateral.args.get("alias")
 497                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 498                if table_alias and not table_alias.args.get("columns"):
 499                    table_alias.set("columns", columns)
 500                elif not table_alias:
 501                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 502
 503            return lateral
 504
 505        def _parse_at_before(self, table: exp.Table) -> exp.Table:
 506            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
 507            index = self._index
 508            if self._match_texts(("AT", "BEFORE")):
 509                this = self._prev.text.upper()
 510                kind = (
 511                    self._match(TokenType.L_PAREN)
 512                    and self._match_texts(self.HISTORICAL_DATA_KIND)
 513                    and self._prev.text.upper()
 514                )
 515                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
 516
 517                if expression:
 518                    self._match_r_paren()
 519                    when = self.expression(
 520                        exp.HistoricalData, this=this, kind=kind, expression=expression
 521                    )
 522                    table.set("when", when)
 523                else:
 524                    self._retreat(index)
 525
 526            return table
 527
 528        def _parse_table_parts(
 529            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 530        ) -> exp.Table:
 531            # https://docs.snowflake.com/en/user-guide/querying-stage
 532            if self._match(TokenType.STRING, advance=False):
 533                table = self._parse_string()
 534            elif self._match_text_seq("@", advance=False):
 535                table = self._parse_location_path()
 536            else:
 537                table = None
 538
 539            if table:
 540                file_format = None
 541                pattern = None
 542
 543                wrapped = self._match(TokenType.L_PAREN)
 544                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 545                    if self._match_text_seq("FILE_FORMAT", "=>"):
 546                        file_format = self._parse_string() or super()._parse_table_parts(
 547                            is_db_reference=is_db_reference
 548                        )
 549                    elif self._match_text_seq("PATTERN", "=>"):
 550                        pattern = self._parse_string()
 551                    else:
 552                        break
 553
 554                    self._match(TokenType.COMMA)
 555
 556                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 557            else:
 558                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 559
 560            return self._parse_at_before(table)
 561
 562        def _parse_id_var(
 563            self,
 564            any_token: bool = True,
 565            tokens: t.Optional[t.Collection[TokenType]] = None,
 566        ) -> t.Optional[exp.Expression]:
 567            if self._match_text_seq("IDENTIFIER", "("):
 568                identifier = (
 569                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 570                    or self._parse_string()
 571                )
 572                self._match_r_paren()
 573                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 574
 575            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 576
 577        def _parse_show_snowflake(self, this: str) -> exp.Show:
 578            scope = None
 579            scope_kind = None
 580
 581            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 582            # which is syntactically valid but has no effect on the output
 583            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 584
 585            history = self._match_text_seq("HISTORY")
 586
 587            like = self._parse_string() if self._match(TokenType.LIKE) else None
 588
 589            if self._match(TokenType.IN):
 590                if self._match_text_seq("ACCOUNT"):
 591                    scope_kind = "ACCOUNT"
 592                elif self._match_set(self.DB_CREATABLES):
 593                    scope_kind = self._prev.text.upper()
 594                    if self._curr:
 595                        scope = self._parse_table_parts()
 596                elif self._curr:
 597                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 598                    scope = self._parse_table_parts()
 599
 600            return self.expression(
 601                exp.Show,
 602                **{
 603                    "terse": terse,
 604                    "this": this,
 605                    "history": history,
 606                    "like": like,
 607                    "scope": scope,
 608                    "scope_kind": scope_kind,
 609                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 610                    "limit": self._parse_limit(),
 611                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 612                },
 613            )
 614
 615        def _parse_alter_table_swap(self) -> exp.SwapTable:
 616            self._match_text_seq("WITH")
 617            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 618
 619        def _parse_location_property(self) -> exp.LocationProperty:
 620            self._match(TokenType.EQ)
 621            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 622
 623        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 624            # Parse either a subquery or a staged file
 625            return (
 626                self._parse_select(table=True, parse_subquery_alias=False)
 627                if self._match(TokenType.L_PAREN, advance=False)
 628                else self._parse_table_parts()
 629            )
 630
 631        def _parse_location_path(self) -> exp.Var:
 632            parts = [self._advance_any(ignore_reserved=True)]
 633
 634            # We avoid consuming a comma token because external tables like @foo and @bar
 635            # can be joined in a query with a comma separator, as well as closing paren
 636            # in case of subqueries
 637            while self._is_connected() and not self._match_set(
 638                (TokenType.COMMA, TokenType.R_PAREN), advance=False
 639            ):
 640                parts.append(self._advance_any(ignore_reserved=True))
 641
 642            return exp.var("".join(part.text for part in parts if part))
 643
 644        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 645            this = super()._parse_lambda_arg()
 646
 647            if not this:
 648                return this
 649
 650            typ = self._parse_types()
 651
 652            if typ:
 653                return self.expression(exp.Cast, this=this, to=typ)
 654
 655            return this
 656
 657    class Tokenizer(tokens.Tokenizer):
 658        STRING_ESCAPES = ["\\", "'"]
 659        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 660        RAW_STRINGS = ["$$"]
 661        COMMENTS = ["--", "//", ("/*", "*/")]
 662
 663        KEYWORDS = {
 664            **tokens.Tokenizer.KEYWORDS,
 665            "BYTEINT": TokenType.INT,
 666            "CHAR VARYING": TokenType.VARCHAR,
 667            "CHARACTER VARYING": TokenType.VARCHAR,
 668            "EXCLUDE": TokenType.EXCEPT,
 669            "ILIKE ANY": TokenType.ILIKE_ANY,
 670            "LIKE ANY": TokenType.LIKE_ANY,
 671            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 672            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 673            "MINUS": TokenType.EXCEPT,
 674            "NCHAR VARYING": TokenType.VARCHAR,
 675            "PUT": TokenType.COMMAND,
 676            "REMOVE": TokenType.COMMAND,
 677            "RM": TokenType.COMMAND,
 678            "SAMPLE": TokenType.TABLE_SAMPLE,
 679            "SQL_DOUBLE": TokenType.DOUBLE,
 680            "SQL_VARCHAR": TokenType.VARCHAR,
 681            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 682            "TAG": TokenType.TAG,
 683            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 684            "TOP": TokenType.TOP,
 685            "WAREHOUSE": TokenType.WAREHOUSE,
 686            "STREAMLIT": TokenType.STREAMLIT,
 687        }
 688        KEYWORDS.pop("/*+")
 689
 690        SINGLE_TOKENS = {
 691            **tokens.Tokenizer.SINGLE_TOKENS,
 692            "$": TokenType.PARAMETER,
 693        }
 694
 695        VAR_SINGLE_TOKENS = {"$"}
 696
 697        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 698
 699    class Generator(generator.Generator):
 700        PARAMETER_TOKEN = "$"
 701        MATCHED_BY_SOURCE = False
 702        SINGLE_STRING_INTERVAL = True
 703        JOIN_HINTS = False
 704        TABLE_HINTS = False
 705        QUERY_HINTS = False
 706        AGGREGATE_FILTER_SUPPORTED = False
 707        SUPPORTS_TABLE_COPY = False
 708        COLLATE_IS_FUNC = True
 709        LIMIT_ONLY_LITERALS = True
 710        JSON_KEY_VALUE_PAIR_SEP = ","
 711        INSERT_OVERWRITE = " OVERWRITE INTO"
 712        STRUCT_DELIMITER = ("(", ")")
 713        COPY_PARAMS_ARE_WRAPPED = False
 714        COPY_PARAMS_EQ_REQUIRED = True
 715        STAR_EXCEPT = "EXCLUDE"
 716
 717        TRANSFORMS = {
 718            **generator.Generator.TRANSFORMS,
 719            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 720            exp.ArgMax: rename_func("MAX_BY"),
 721            exp.ArgMin: rename_func("MIN_BY"),
 722            exp.Array: inline_array_sql,
 723            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 724            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 725            exp.AtTimeZone: lambda self, e: self.func(
 726                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 727            ),
 728            exp.BitwiseXor: rename_func("BITXOR"),
 729            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 730            exp.DateAdd: date_delta_sql("DATEADD"),
 731            exp.DateDiff: date_delta_sql("DATEDIFF"),
 732            exp.DateStrToDate: datestrtodate_sql,
 733            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 734            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 735            exp.DayOfYear: rename_func("DAYOFYEAR"),
 736            exp.Explode: rename_func("FLATTEN"),
 737            exp.Extract: rename_func("DATE_PART"),
 738            exp.FromTimeZone: lambda self, e: self.func(
 739                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 740            ),
 741            exp.GenerateSeries: lambda self, e: self.func(
 742                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 743            ),
 744            exp.GroupConcat: rename_func("LISTAGG"),
 745            exp.If: if_sql(name="IFF", false_value="NULL"),
 746            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 747            exp.JSONExtractScalar: lambda self, e: self.func(
 748                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 749            ),
 750            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 751            exp.JSONPathRoot: lambda *_: "",
 752            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 753            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 754            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 755            exp.Max: max_or_greatest,
 756            exp.Min: min_or_least,
 757            exp.ParseJSON: lambda self, e: self.func(
 758                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 759            ),
 760            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 761            exp.PercentileCont: transforms.preprocess(
 762                [transforms.add_within_group_for_percentiles]
 763            ),
 764            exp.PercentileDisc: transforms.preprocess(
 765                [transforms.add_within_group_for_percentiles]
 766            ),
 767            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 768            exp.RegexpILike: _regexpilike_sql,
 769            exp.Rand: rename_func("RANDOM"),
 770            exp.Select: transforms.preprocess(
 771                [
 772                    transforms.eliminate_distinct_on,
 773                    transforms.explode_to_unnest(),
 774                    transforms.eliminate_semi_and_anti_joins,
 775                ]
 776            ),
 777            exp.SHA: rename_func("SHA1"),
 778            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 779            exp.StartsWith: rename_func("STARTSWITH"),
 780            exp.StrPosition: lambda self, e: self.func(
 781                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 782            ),
 783            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 784            exp.Stuff: rename_func("INSERT"),
 785            exp.TimeAdd: date_delta_sql("TIMEADD"),
 786            exp.TimestampDiff: lambda self, e: self.func(
 787                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 788            ),
 789            exp.TimestampTrunc: timestamptrunc_sql(),
 790            exp.TimeStrToTime: timestrtotime_sql,
 791            exp.TimeToStr: lambda self, e: self.func(
 792                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 793            ),
 794            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 795            exp.ToArray: rename_func("TO_ARRAY"),
 796            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 797            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 798            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 799            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 800            exp.TsOrDsToDate: lambda self, e: self.func(
 801                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 802            ),
 803            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 804            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 805            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 806            exp.Xor: rename_func("BOOLXOR"),
 807        }
 808
 809        SUPPORTED_JSON_PATH_PARTS = {
 810            exp.JSONPathKey,
 811            exp.JSONPathRoot,
 812            exp.JSONPathSubscript,
 813        }
 814
 815        TYPE_MAPPING = {
 816            **generator.Generator.TYPE_MAPPING,
 817            exp.DataType.Type.NESTED: "OBJECT",
 818            exp.DataType.Type.STRUCT: "OBJECT",
 819        }
 820
 821        PROPERTIES_LOCATION = {
 822            **generator.Generator.PROPERTIES_LOCATION,
 823            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 824            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 825        }
 826
 827        UNSUPPORTED_VALUES_EXPRESSIONS = {
 828            exp.Map,
 829            exp.StarMap,
 830            exp.Struct,
 831            exp.VarMap,
 832        }
 833
 834        def with_properties(self, properties: exp.Properties) -> str:
 835            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 836
 837        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 838            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 839                values_as_table = False
 840
 841            return super().values_sql(expression, values_as_table=values_as_table)
 842
 843        def datatype_sql(self, expression: exp.DataType) -> str:
 844            expressions = expression.expressions
 845            if (
 846                expressions
 847                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 848                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 849            ):
 850                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 851                return "OBJECT"
 852
 853            return super().datatype_sql(expression)
 854
 855        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 856            return self.func(
 857                "TO_NUMBER",
 858                expression.this,
 859                expression.args.get("format"),
 860                expression.args.get("precision"),
 861                expression.args.get("scale"),
 862            )
 863
 864        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 865            milli = expression.args.get("milli")
 866            if milli is not None:
 867                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 868                expression.set("nano", milli_to_nano)
 869
 870            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 871
 872        def trycast_sql(self, expression: exp.TryCast) -> str:
 873            value = expression.this
 874
 875            if value.type is None:
 876                from sqlglot.optimizer.annotate_types import annotate_types
 877
 878                value = annotate_types(value)
 879
 880            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 881                return super().trycast_sql(expression)
 882
 883            # TRY_CAST only works for string values in Snowflake
 884            return self.cast_sql(expression)
 885
 886        def log_sql(self, expression: exp.Log) -> str:
 887            if not expression.expression:
 888                return self.func("LN", expression.this)
 889
 890            return super().log_sql(expression)
 891
 892        def unnest_sql(self, expression: exp.Unnest) -> str:
 893            unnest_alias = expression.args.get("alias")
 894            offset = expression.args.get("offset")
 895
 896            columns = [
 897                exp.to_identifier("seq"),
 898                exp.to_identifier("key"),
 899                exp.to_identifier("path"),
 900                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 901                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 902                or exp.to_identifier("value"),
 903                exp.to_identifier("this"),
 904            ]
 905
 906            if unnest_alias:
 907                unnest_alias.set("columns", columns)
 908            else:
 909                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 910
 911            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 912            alias = self.sql(unnest_alias)
 913            alias = f" AS {alias}" if alias else ""
 914            return f"{explode}{alias}"
 915
 916        def show_sql(self, expression: exp.Show) -> str:
 917            terse = "TERSE " if expression.args.get("terse") else ""
 918            history = " HISTORY" if expression.args.get("history") else ""
 919            like = self.sql(expression, "like")
 920            like = f" LIKE {like}" if like else ""
 921
 922            scope = self.sql(expression, "scope")
 923            scope = f" {scope}" if scope else ""
 924
 925            scope_kind = self.sql(expression, "scope_kind")
 926            if scope_kind:
 927                scope_kind = f" IN {scope_kind}"
 928
 929            starts_with = self.sql(expression, "starts_with")
 930            if starts_with:
 931                starts_with = f" STARTS WITH {starts_with}"
 932
 933            limit = self.sql(expression, "limit")
 934
 935            from_ = self.sql(expression, "from")
 936            if from_:
 937                from_ = f" FROM {from_}"
 938
 939            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 940
 941        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 942            # Other dialects don't support all of the following parameters, so we need to
 943            # generate default values as necessary to ensure the transpilation is correct
 944            group = expression.args.get("group")
 945            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 946            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 947            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 948
 949            return self.func(
 950                "REGEXP_SUBSTR",
 951                expression.this,
 952                expression.expression,
 953                position,
 954                occurrence,
 955                parameters,
 956                group,
 957            )
 958
 959        def except_op(self, expression: exp.Except) -> str:
 960            if not expression.args.get("distinct"):
 961                self.unsupported("EXCEPT with All is not supported in Snowflake")
 962            return super().except_op(expression)
 963
 964        def intersect_op(self, expression: exp.Intersect) -> str:
 965            if not expression.args.get("distinct"):
 966                self.unsupported("INTERSECT with All is not supported in Snowflake")
 967            return super().intersect_op(expression)
 968
 969        def describe_sql(self, expression: exp.Describe) -> str:
 970            # Default to table if kind is unknown
 971            kind_value = expression.args.get("kind") or "TABLE"
 972            kind = f" {kind_value}" if kind_value else ""
 973            this = f" {self.sql(expression, 'this')}"
 974            expressions = self.expressions(expression, flat=True)
 975            expressions = f" {expressions}" if expressions else ""
 976            return f"DESCRIBE{kind}{this}{expressions}"
 977
 978        def generatedasidentitycolumnconstraint_sql(
 979            self, expression: exp.GeneratedAsIdentityColumnConstraint
 980        ) -> str:
 981            start = expression.args.get("start")
 982            start = f" START {start}" if start else ""
 983            increment = expression.args.get("increment")
 984            increment = f" INCREMENT {increment}" if increment else ""
 985            return f"AUTOINCREMENT{start}{increment}"
 986
 987        def swaptable_sql(self, expression: exp.SwapTable) -> str:
 988            this = self.sql(expression, "this")
 989            return f"SWAP WITH {this}"
 990
 991        def cluster_sql(self, expression: exp.Cluster) -> str:
 992            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
 993
 994        def struct_sql(self, expression: exp.Struct) -> str:
 995            keys = []
 996            values = []
 997
 998            for i, e in enumerate(expression.expressions):
 999                if isinstance(e, exp.PropertyEQ):
1000                    keys.append(
1001                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1002                    )
1003                    values.append(e.expression)
1004                else:
1005                    keys.append(exp.Literal.string(f"_{i}"))
1006                    values.append(e)
1007
1008            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1009
1010        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1011            if expression.args.get("weight") or expression.args.get("accuracy"):
1012                self.unsupported(
1013                    "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake"
1014                )
1015
1016            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1017
1018        def alterset_sql(self, expression: exp.AlterSet) -> str:
1019            exprs = self.expressions(expression, flat=True)
1020            exprs = f" {exprs}" if exprs else ""
1021            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1022            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1023            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1024            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1025            tag = self.expressions(expression, key="tag", flat=True)
1026            tag = f" TAG {tag}" if tag else ""
1027
1028            return f"SET{exprs}{file_format}{copy_options}{tag}"
class Snowflake(sqlglot.dialects.dialect.Dialect):
 190class Snowflake(Dialect):
 191    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 192    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 193    NULL_ORDERING = "nulls_are_large"
 194    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 195    SUPPORTS_USER_DEFINED_TYPES = False
 196    SUPPORTS_SEMI_ANTI_JOIN = False
 197    PREFER_CTE_ALIAS_COLUMN = True
 198    TABLESAMPLE_SIZE_IS_PERCENT = True
 199    COPY_PARAMS_ARE_CSV = False
 200
 201    TIME_MAPPING = {
 202        "YYYY": "%Y",
 203        "yyyy": "%Y",
 204        "YY": "%y",
 205        "yy": "%y",
 206        "MMMM": "%B",
 207        "mmmm": "%B",
 208        "MON": "%b",
 209        "mon": "%b",
 210        "MM": "%m",
 211        "mm": "%m",
 212        "DD": "%d",
 213        "dd": "%-d",
 214        "DY": "%a",
 215        "dy": "%w",
 216        "HH24": "%H",
 217        "hh24": "%H",
 218        "HH12": "%I",
 219        "hh12": "%I",
 220        "MI": "%M",
 221        "mi": "%M",
 222        "SS": "%S",
 223        "ss": "%S",
 224        "FF": "%f",
 225        "ff": "%f",
 226        "FF6": "%f",
 227        "ff6": "%f",
 228    }
 229
 230    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 231        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 232        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 233        if (
 234            isinstance(expression, exp.Identifier)
 235            and isinstance(expression.parent, exp.Table)
 236            and expression.name.lower() == "dual"
 237        ):
 238            return expression  # type: ignore
 239
 240        return super().quote_identifier(expression, identify=identify)
 241
 242    class Parser(parser.Parser):
 243        IDENTIFY_PIVOT_STRINGS = True
 244        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 245        COLON_IS_JSON_EXTRACT = True
 246
 247        ID_VAR_TOKENS = {
 248            *parser.Parser.ID_VAR_TOKENS,
 249            TokenType.MATCH_CONDITION,
 250        }
 251
 252        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 253        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 254
 255        FUNCTIONS = {
 256            **parser.Parser.FUNCTIONS,
 257            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 258            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
 259            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
 260            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 261                this=seq_get(args, 1), expression=seq_get(args, 0)
 262            ),
 263            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 264                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 265                start=seq_get(args, 0),
 266                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 267                step=seq_get(args, 2),
 268            ),
 269            "BITXOR": binary_from_function(exp.BitwiseXor),
 270            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 271            "BOOLXOR": binary_from_function(exp.Xor),
 272            "CONVERT_TIMEZONE": _build_convert_timezone,
 273            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 274            "DATE_TRUNC": _date_trunc_to_time,
 275            "DATEADD": _build_date_time_add(exp.DateAdd),
 276            "DATEDIFF": _build_datediff,
 277            "DIV0": _build_if_from_div0,
 278            "FLATTEN": exp.Explode.from_arg_list,
 279            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 280                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 281            ),
 282            "IFF": exp.If.from_arg_list,
 283            "LAST_DAY": lambda args: exp.LastDay(
 284                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 285            ),
 286            "LISTAGG": exp.GroupConcat.from_arg_list,
 287            "MEDIAN": lambda args: exp.PercentileCont(
 288                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 289            ),
 290            "NULLIFZERO": _build_if_from_nullifzero,
 291            "OBJECT_CONSTRUCT": _build_object_construct,
 292            "REGEXP_REPLACE": _build_regexp_replace,
 293            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
 294            "RLIKE": exp.RegexpLike.from_arg_list,
 295            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 296            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 297            "TIMEDIFF": _build_datediff,
 298            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 299            "TIMESTAMPDIFF": _build_datediff,
 300            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 301            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 302            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 303            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 304            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 305            "TO_NUMBER": lambda args: exp.ToNumber(
 306                this=seq_get(args, 0),
 307                format=seq_get(args, 1),
 308                precision=seq_get(args, 2),
 309                scale=seq_get(args, 3),
 310            ),
 311            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 312            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 313            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 314            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 315            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 316            "TO_VARCHAR": exp.ToChar.from_arg_list,
 317            "ZEROIFNULL": _build_if_from_zeroifnull,
 318        }
 319
 320        FUNCTION_PARSERS = {
 321            **parser.Parser.FUNCTION_PARSERS,
 322            "DATE_PART": lambda self: self._parse_date_part(),
 323            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 324        }
 325        FUNCTION_PARSERS.pop("TRIM")
 326
 327        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 328
 329        RANGE_PARSERS = {
 330            **parser.Parser.RANGE_PARSERS,
 331            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 332            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 333        }
 334
 335        ALTER_PARSERS = {
 336            **parser.Parser.ALTER_PARSERS,
 337            "UNSET": lambda self: self.expression(
 338                exp.Set,
 339                tag=self._match_text_seq("TAG"),
 340                expressions=self._parse_csv(self._parse_id_var),
 341                unset=True,
 342            ),
 343            "SWAP": lambda self: self._parse_alter_table_swap(),
 344        }
 345
 346        STATEMENT_PARSERS = {
 347            **parser.Parser.STATEMENT_PARSERS,
 348            TokenType.SHOW: lambda self: self._parse_show(),
 349        }
 350
 351        PROPERTY_PARSERS = {
 352            **parser.Parser.PROPERTY_PARSERS,
 353            "LOCATION": lambda self: self._parse_location_property(),
 354        }
 355
 356        TYPE_CONVERTERS = {
 357            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 358            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 359        }
 360
 361        SHOW_PARSERS = {
 362            "SCHEMAS": _show_parser("SCHEMAS"),
 363            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 364            "OBJECTS": _show_parser("OBJECTS"),
 365            "TERSE OBJECTS": _show_parser("OBJECTS"),
 366            "TABLES": _show_parser("TABLES"),
 367            "TERSE TABLES": _show_parser("TABLES"),
 368            "VIEWS": _show_parser("VIEWS"),
 369            "TERSE VIEWS": _show_parser("VIEWS"),
 370            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 371            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 372            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 373            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 374            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 375            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 376            "SEQUENCES": _show_parser("SEQUENCES"),
 377            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 378            "COLUMNS": _show_parser("COLUMNS"),
 379            "USERS": _show_parser("USERS"),
 380            "TERSE USERS": _show_parser("USERS"),
 381        }
 382
 383        CONSTRAINT_PARSERS = {
 384            **parser.Parser.CONSTRAINT_PARSERS,
 385            "WITH": lambda self: self._parse_with_constraint(),
 386            "MASKING": lambda self: self._parse_with_constraint(),
 387            "PROJECTION": lambda self: self._parse_with_constraint(),
 388            "TAG": lambda self: self._parse_with_constraint(),
 389        }
 390
 391        STAGED_FILE_SINGLE_TOKENS = {
 392            TokenType.DOT,
 393            TokenType.MOD,
 394            TokenType.SLASH,
 395        }
 396
 397        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 398
 399        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 400
 401        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 402
 403        LAMBDAS = {
 404            **parser.Parser.LAMBDAS,
 405            TokenType.ARROW: lambda self, expressions: self.expression(
 406                exp.Lambda,
 407                this=self._replace_lambda(
 408                    self._parse_assignment(),
 409                    expressions,
 410                ),
 411                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 412            ),
 413        }
 414
 415        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 416            if self._prev.token_type != TokenType.WITH:
 417                self._retreat(self._index - 1)
 418
 419            if self._match_text_seq("MASKING", "POLICY"):
 420                policy = self._parse_column()
 421                return self.expression(
 422                    exp.MaskingPolicyColumnConstraint,
 423                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 424                    expressions=self._match(TokenType.USING)
 425                    and self._parse_wrapped_csv(self._parse_id_var),
 426                )
 427            if self._match_text_seq("PROJECTION", "POLICY"):
 428                policy = self._parse_column()
 429                return self.expression(
 430                    exp.ProjectionPolicyColumnConstraint,
 431                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 432                )
 433            if self._match(TokenType.TAG):
 434                return self.expression(
 435                    exp.TagColumnConstraint,
 436                    expressions=self._parse_wrapped_csv(self._parse_property),
 437                )
 438
 439            return None
 440
 441        def _parse_create(self) -> exp.Create | exp.Command:
 442            expression = super()._parse_create()
 443            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 444                # Replace the Table node with the enclosed Identifier
 445                expression.this.replace(expression.this.this)
 446
 447            return expression
 448
 449        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 450        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 451        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 452            this = self._parse_var() or self._parse_type()
 453
 454            if not this:
 455                return None
 456
 457            self._match(TokenType.COMMA)
 458            expression = self._parse_bitwise()
 459            this = map_date_part(this)
 460            name = this.name.upper()
 461
 462            if name.startswith("EPOCH"):
 463                if name == "EPOCH_MILLISECOND":
 464                    scale = 10**3
 465                elif name == "EPOCH_MICROSECOND":
 466                    scale = 10**6
 467                elif name == "EPOCH_NANOSECOND":
 468                    scale = 10**9
 469                else:
 470                    scale = None
 471
 472                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 473                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 474
 475                if scale:
 476                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 477
 478                return to_unix
 479
 480            return self.expression(exp.Extract, this=this, expression=expression)
 481
 482        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 483            if is_map:
 484                # Keys are strings in Snowflake's objects, see also:
 485                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 486                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 487                return self._parse_slice(self._parse_string())
 488
 489            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 490
 491        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 492            lateral = super()._parse_lateral()
 493            if not lateral:
 494                return lateral
 495
 496            if isinstance(lateral.this, exp.Explode):
 497                table_alias = lateral.args.get("alias")
 498                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 499                if table_alias and not table_alias.args.get("columns"):
 500                    table_alias.set("columns", columns)
 501                elif not table_alias:
 502                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 503
 504            return lateral
 505
 506        def _parse_at_before(self, table: exp.Table) -> exp.Table:
 507            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
 508            index = self._index
 509            if self._match_texts(("AT", "BEFORE")):
 510                this = self._prev.text.upper()
 511                kind = (
 512                    self._match(TokenType.L_PAREN)
 513                    and self._match_texts(self.HISTORICAL_DATA_KIND)
 514                    and self._prev.text.upper()
 515                )
 516                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
 517
 518                if expression:
 519                    self._match_r_paren()
 520                    when = self.expression(
 521                        exp.HistoricalData, this=this, kind=kind, expression=expression
 522                    )
 523                    table.set("when", when)
 524                else:
 525                    self._retreat(index)
 526
 527            return table
 528
 529        def _parse_table_parts(
 530            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 531        ) -> exp.Table:
 532            # https://docs.snowflake.com/en/user-guide/querying-stage
 533            if self._match(TokenType.STRING, advance=False):
 534                table = self._parse_string()
 535            elif self._match_text_seq("@", advance=False):
 536                table = self._parse_location_path()
 537            else:
 538                table = None
 539
 540            if table:
 541                file_format = None
 542                pattern = None
 543
 544                wrapped = self._match(TokenType.L_PAREN)
 545                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 546                    if self._match_text_seq("FILE_FORMAT", "=>"):
 547                        file_format = self._parse_string() or super()._parse_table_parts(
 548                            is_db_reference=is_db_reference
 549                        )
 550                    elif self._match_text_seq("PATTERN", "=>"):
 551                        pattern = self._parse_string()
 552                    else:
 553                        break
 554
 555                    self._match(TokenType.COMMA)
 556
 557                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 558            else:
 559                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 560
 561            return self._parse_at_before(table)
 562
 563        def _parse_id_var(
 564            self,
 565            any_token: bool = True,
 566            tokens: t.Optional[t.Collection[TokenType]] = None,
 567        ) -> t.Optional[exp.Expression]:
 568            if self._match_text_seq("IDENTIFIER", "("):
 569                identifier = (
 570                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 571                    or self._parse_string()
 572                )
 573                self._match_r_paren()
 574                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 575
 576            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 577
 578        def _parse_show_snowflake(self, this: str) -> exp.Show:
 579            scope = None
 580            scope_kind = None
 581
 582            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 583            # which is syntactically valid but has no effect on the output
 584            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 585
 586            history = self._match_text_seq("HISTORY")
 587
 588            like = self._parse_string() if self._match(TokenType.LIKE) else None
 589
 590            if self._match(TokenType.IN):
 591                if self._match_text_seq("ACCOUNT"):
 592                    scope_kind = "ACCOUNT"
 593                elif self._match_set(self.DB_CREATABLES):
 594                    scope_kind = self._prev.text.upper()
 595                    if self._curr:
 596                        scope = self._parse_table_parts()
 597                elif self._curr:
 598                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 599                    scope = self._parse_table_parts()
 600
 601            return self.expression(
 602                exp.Show,
 603                **{
 604                    "terse": terse,
 605                    "this": this,
 606                    "history": history,
 607                    "like": like,
 608                    "scope": scope,
 609                    "scope_kind": scope_kind,
 610                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 611                    "limit": self._parse_limit(),
 612                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 613                },
 614            )
 615
 616        def _parse_alter_table_swap(self) -> exp.SwapTable:
 617            self._match_text_seq("WITH")
 618            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 619
 620        def _parse_location_property(self) -> exp.LocationProperty:
 621            self._match(TokenType.EQ)
 622            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 623
 624        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 625            # Parse either a subquery or a staged file
 626            return (
 627                self._parse_select(table=True, parse_subquery_alias=False)
 628                if self._match(TokenType.L_PAREN, advance=False)
 629                else self._parse_table_parts()
 630            )
 631
 632        def _parse_location_path(self) -> exp.Var:
 633            parts = [self._advance_any(ignore_reserved=True)]
 634
 635            # We avoid consuming a comma token because external tables like @foo and @bar
 636            # can be joined in a query with a comma separator, as well as closing paren
 637            # in case of subqueries
 638            while self._is_connected() and not self._match_set(
 639                (TokenType.COMMA, TokenType.R_PAREN), advance=False
 640            ):
 641                parts.append(self._advance_any(ignore_reserved=True))
 642
 643            return exp.var("".join(part.text for part in parts if part))
 644
 645        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 646            this = super()._parse_lambda_arg()
 647
 648            if not this:
 649                return this
 650
 651            typ = self._parse_types()
 652
 653            if typ:
 654                return self.expression(exp.Cast, this=this, to=typ)
 655
 656            return this
 657
 658    class Tokenizer(tokens.Tokenizer):
 659        STRING_ESCAPES = ["\\", "'"]
 660        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 661        RAW_STRINGS = ["$$"]
 662        COMMENTS = ["--", "//", ("/*", "*/")]
 663
 664        KEYWORDS = {
 665            **tokens.Tokenizer.KEYWORDS,
 666            "BYTEINT": TokenType.INT,
 667            "CHAR VARYING": TokenType.VARCHAR,
 668            "CHARACTER VARYING": TokenType.VARCHAR,
 669            "EXCLUDE": TokenType.EXCEPT,
 670            "ILIKE ANY": TokenType.ILIKE_ANY,
 671            "LIKE ANY": TokenType.LIKE_ANY,
 672            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 673            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 674            "MINUS": TokenType.EXCEPT,
 675            "NCHAR VARYING": TokenType.VARCHAR,
 676            "PUT": TokenType.COMMAND,
 677            "REMOVE": TokenType.COMMAND,
 678            "RM": TokenType.COMMAND,
 679            "SAMPLE": TokenType.TABLE_SAMPLE,
 680            "SQL_DOUBLE": TokenType.DOUBLE,
 681            "SQL_VARCHAR": TokenType.VARCHAR,
 682            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 683            "TAG": TokenType.TAG,
 684            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 685            "TOP": TokenType.TOP,
 686            "WAREHOUSE": TokenType.WAREHOUSE,
 687            "STREAMLIT": TokenType.STREAMLIT,
 688        }
 689        KEYWORDS.pop("/*+")
 690
 691        SINGLE_TOKENS = {
 692            **tokens.Tokenizer.SINGLE_TOKENS,
 693            "$": TokenType.PARAMETER,
 694        }
 695
 696        VAR_SINGLE_TOKENS = {"$"}
 697
 698        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 699
 700    class Generator(generator.Generator):
 701        PARAMETER_TOKEN = "$"
 702        MATCHED_BY_SOURCE = False
 703        SINGLE_STRING_INTERVAL = True
 704        JOIN_HINTS = False
 705        TABLE_HINTS = False
 706        QUERY_HINTS = False
 707        AGGREGATE_FILTER_SUPPORTED = False
 708        SUPPORTS_TABLE_COPY = False
 709        COLLATE_IS_FUNC = True
 710        LIMIT_ONLY_LITERALS = True
 711        JSON_KEY_VALUE_PAIR_SEP = ","
 712        INSERT_OVERWRITE = " OVERWRITE INTO"
 713        STRUCT_DELIMITER = ("(", ")")
 714        COPY_PARAMS_ARE_WRAPPED = False
 715        COPY_PARAMS_EQ_REQUIRED = True
 716        STAR_EXCEPT = "EXCLUDE"
 717
 718        TRANSFORMS = {
 719            **generator.Generator.TRANSFORMS,
 720            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 721            exp.ArgMax: rename_func("MAX_BY"),
 722            exp.ArgMin: rename_func("MIN_BY"),
 723            exp.Array: inline_array_sql,
 724            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 725            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 726            exp.AtTimeZone: lambda self, e: self.func(
 727                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 728            ),
 729            exp.BitwiseXor: rename_func("BITXOR"),
 730            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 731            exp.DateAdd: date_delta_sql("DATEADD"),
 732            exp.DateDiff: date_delta_sql("DATEDIFF"),
 733            exp.DateStrToDate: datestrtodate_sql,
 734            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 735            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 736            exp.DayOfYear: rename_func("DAYOFYEAR"),
 737            exp.Explode: rename_func("FLATTEN"),
 738            exp.Extract: rename_func("DATE_PART"),
 739            exp.FromTimeZone: lambda self, e: self.func(
 740                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 741            ),
 742            exp.GenerateSeries: lambda self, e: self.func(
 743                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 744            ),
 745            exp.GroupConcat: rename_func("LISTAGG"),
 746            exp.If: if_sql(name="IFF", false_value="NULL"),
 747            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 748            exp.JSONExtractScalar: lambda self, e: self.func(
 749                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 750            ),
 751            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 752            exp.JSONPathRoot: lambda *_: "",
 753            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 754            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 755            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 756            exp.Max: max_or_greatest,
 757            exp.Min: min_or_least,
 758            exp.ParseJSON: lambda self, e: self.func(
 759                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 760            ),
 761            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 762            exp.PercentileCont: transforms.preprocess(
 763                [transforms.add_within_group_for_percentiles]
 764            ),
 765            exp.PercentileDisc: transforms.preprocess(
 766                [transforms.add_within_group_for_percentiles]
 767            ),
 768            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 769            exp.RegexpILike: _regexpilike_sql,
 770            exp.Rand: rename_func("RANDOM"),
 771            exp.Select: transforms.preprocess(
 772                [
 773                    transforms.eliminate_distinct_on,
 774                    transforms.explode_to_unnest(),
 775                    transforms.eliminate_semi_and_anti_joins,
 776                ]
 777            ),
 778            exp.SHA: rename_func("SHA1"),
 779            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 780            exp.StartsWith: rename_func("STARTSWITH"),
 781            exp.StrPosition: lambda self, e: self.func(
 782                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 783            ),
 784            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 785            exp.Stuff: rename_func("INSERT"),
 786            exp.TimeAdd: date_delta_sql("TIMEADD"),
 787            exp.TimestampDiff: lambda self, e: self.func(
 788                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 789            ),
 790            exp.TimestampTrunc: timestamptrunc_sql(),
 791            exp.TimeStrToTime: timestrtotime_sql,
 792            exp.TimeToStr: lambda self, e: self.func(
 793                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 794            ),
 795            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 796            exp.ToArray: rename_func("TO_ARRAY"),
 797            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 798            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 799            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 800            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 801            exp.TsOrDsToDate: lambda self, e: self.func(
 802                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 803            ),
 804            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 805            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 806            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 807            exp.Xor: rename_func("BOOLXOR"),
 808        }
 809
 810        SUPPORTED_JSON_PATH_PARTS = {
 811            exp.JSONPathKey,
 812            exp.JSONPathRoot,
 813            exp.JSONPathSubscript,
 814        }
 815
 816        TYPE_MAPPING = {
 817            **generator.Generator.TYPE_MAPPING,
 818            exp.DataType.Type.NESTED: "OBJECT",
 819            exp.DataType.Type.STRUCT: "OBJECT",
 820        }
 821
 822        PROPERTIES_LOCATION = {
 823            **generator.Generator.PROPERTIES_LOCATION,
 824            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 825            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 826        }
 827
 828        UNSUPPORTED_VALUES_EXPRESSIONS = {
 829            exp.Map,
 830            exp.StarMap,
 831            exp.Struct,
 832            exp.VarMap,
 833        }
 834
 835        def with_properties(self, properties: exp.Properties) -> str:
 836            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 837
 838        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 839            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 840                values_as_table = False
 841
 842            return super().values_sql(expression, values_as_table=values_as_table)
 843
 844        def datatype_sql(self, expression: exp.DataType) -> str:
 845            expressions = expression.expressions
 846            if (
 847                expressions
 848                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 849                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 850            ):
 851                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 852                return "OBJECT"
 853
 854            return super().datatype_sql(expression)
 855
 856        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 857            return self.func(
 858                "TO_NUMBER",
 859                expression.this,
 860                expression.args.get("format"),
 861                expression.args.get("precision"),
 862                expression.args.get("scale"),
 863            )
 864
 865        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 866            milli = expression.args.get("milli")
 867            if milli is not None:
 868                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 869                expression.set("nano", milli_to_nano)
 870
 871            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 872
 873        def trycast_sql(self, expression: exp.TryCast) -> str:
 874            value = expression.this
 875
 876            if value.type is None:
 877                from sqlglot.optimizer.annotate_types import annotate_types
 878
 879                value = annotate_types(value)
 880
 881            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 882                return super().trycast_sql(expression)
 883
 884            # TRY_CAST only works for string values in Snowflake
 885            return self.cast_sql(expression)
 886
 887        def log_sql(self, expression: exp.Log) -> str:
 888            if not expression.expression:
 889                return self.func("LN", expression.this)
 890
 891            return super().log_sql(expression)
 892
 893        def unnest_sql(self, expression: exp.Unnest) -> str:
 894            unnest_alias = expression.args.get("alias")
 895            offset = expression.args.get("offset")
 896
 897            columns = [
 898                exp.to_identifier("seq"),
 899                exp.to_identifier("key"),
 900                exp.to_identifier("path"),
 901                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 902                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 903                or exp.to_identifier("value"),
 904                exp.to_identifier("this"),
 905            ]
 906
 907            if unnest_alias:
 908                unnest_alias.set("columns", columns)
 909            else:
 910                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 911
 912            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 913            alias = self.sql(unnest_alias)
 914            alias = f" AS {alias}" if alias else ""
 915            return f"{explode}{alias}"
 916
 917        def show_sql(self, expression: exp.Show) -> str:
 918            terse = "TERSE " if expression.args.get("terse") else ""
 919            history = " HISTORY" if expression.args.get("history") else ""
 920            like = self.sql(expression, "like")
 921            like = f" LIKE {like}" if like else ""
 922
 923            scope = self.sql(expression, "scope")
 924            scope = f" {scope}" if scope else ""
 925
 926            scope_kind = self.sql(expression, "scope_kind")
 927            if scope_kind:
 928                scope_kind = f" IN {scope_kind}"
 929
 930            starts_with = self.sql(expression, "starts_with")
 931            if starts_with:
 932                starts_with = f" STARTS WITH {starts_with}"
 933
 934            limit = self.sql(expression, "limit")
 935
 936            from_ = self.sql(expression, "from")
 937            if from_:
 938                from_ = f" FROM {from_}"
 939
 940            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 941
 942        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 943            # Other dialects don't support all of the following parameters, so we need to
 944            # generate default values as necessary to ensure the transpilation is correct
 945            group = expression.args.get("group")
 946            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 947            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 948            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 949
 950            return self.func(
 951                "REGEXP_SUBSTR",
 952                expression.this,
 953                expression.expression,
 954                position,
 955                occurrence,
 956                parameters,
 957                group,
 958            )
 959
 960        def except_op(self, expression: exp.Except) -> str:
 961            if not expression.args.get("distinct"):
 962                self.unsupported("EXCEPT with All is not supported in Snowflake")
 963            return super().except_op(expression)
 964
 965        def intersect_op(self, expression: exp.Intersect) -> str:
 966            if not expression.args.get("distinct"):
 967                self.unsupported("INTERSECT with All is not supported in Snowflake")
 968            return super().intersect_op(expression)
 969
 970        def describe_sql(self, expression: exp.Describe) -> str:
 971            # Default to table if kind is unknown
 972            kind_value = expression.args.get("kind") or "TABLE"
 973            kind = f" {kind_value}" if kind_value else ""
 974            this = f" {self.sql(expression, 'this')}"
 975            expressions = self.expressions(expression, flat=True)
 976            expressions = f" {expressions}" if expressions else ""
 977            return f"DESCRIBE{kind}{this}{expressions}"
 978
 979        def generatedasidentitycolumnconstraint_sql(
 980            self, expression: exp.GeneratedAsIdentityColumnConstraint
 981        ) -> str:
 982            start = expression.args.get("start")
 983            start = f" START {start}" if start else ""
 984            increment = expression.args.get("increment")
 985            increment = f" INCREMENT {increment}" if increment else ""
 986            return f"AUTOINCREMENT{start}{increment}"
 987
 988        def swaptable_sql(self, expression: exp.SwapTable) -> str:
 989            this = self.sql(expression, "this")
 990            return f"SWAP WITH {this}"
 991
 992        def cluster_sql(self, expression: exp.Cluster) -> str:
 993            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
 994
 995        def struct_sql(self, expression: exp.Struct) -> str:
 996            keys = []
 997            values = []
 998
 999            for i, e in enumerate(expression.expressions):
1000                if isinstance(e, exp.PropertyEQ):
1001                    keys.append(
1002                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1003                    )
1004                    values.append(e.expression)
1005                else:
1006                    keys.append(exp.Literal.string(f"_{i}"))
1007                    values.append(e)
1008
1009            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1010
1011        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1012            if expression.args.get("weight") or expression.args.get("accuracy"):
1013                self.unsupported(
1014                    "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake"
1015                )
1016
1017            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1018
1019        def alterset_sql(self, expression: exp.AlterSet) -> str:
1020            exprs = self.expressions(expression, flat=True)
1021            exprs = f" {exprs}" if exprs else ""
1022            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1023            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1024            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1025            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1026            tag = self.expressions(expression, key="tag", flat=True)
1027            tag = f" TAG {tag}" if tag else ""
1028
1029            return f"SET{exprs}{file_format}{copy_options}{tag}"
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

COPY_PARAMS_ARE_CSV = False

Whether COPY statement parameters are separated by comma or whitespace

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
230    def quote_identifier(self, expression: E, identify: bool = True) -> E:
231        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
232        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
233        if (
234            isinstance(expression, exp.Identifier)
235            and isinstance(expression.parent, exp.Table)
236            and expression.name.lower() == "dual"
237        ):
238            return expression  # type: ignore
239
240        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.tokens.JSONPathTokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
242    class Parser(parser.Parser):
243        IDENTIFY_PIVOT_STRINGS = True
244        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
245        COLON_IS_JSON_EXTRACT = True
246
247        ID_VAR_TOKENS = {
248            *parser.Parser.ID_VAR_TOKENS,
249            TokenType.MATCH_CONDITION,
250        }
251
252        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
253        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
254
255        FUNCTIONS = {
256            **parser.Parser.FUNCTIONS,
257            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
258            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
259            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
260            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
261                this=seq_get(args, 1), expression=seq_get(args, 0)
262            ),
263            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
264                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
265                start=seq_get(args, 0),
266                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
267                step=seq_get(args, 2),
268            ),
269            "BITXOR": binary_from_function(exp.BitwiseXor),
270            "BIT_XOR": binary_from_function(exp.BitwiseXor),
271            "BOOLXOR": binary_from_function(exp.Xor),
272            "CONVERT_TIMEZONE": _build_convert_timezone,
273            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
274            "DATE_TRUNC": _date_trunc_to_time,
275            "DATEADD": _build_date_time_add(exp.DateAdd),
276            "DATEDIFF": _build_datediff,
277            "DIV0": _build_if_from_div0,
278            "FLATTEN": exp.Explode.from_arg_list,
279            "GET_PATH": lambda args, dialect: exp.JSONExtract(
280                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
281            ),
282            "IFF": exp.If.from_arg_list,
283            "LAST_DAY": lambda args: exp.LastDay(
284                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
285            ),
286            "LISTAGG": exp.GroupConcat.from_arg_list,
287            "MEDIAN": lambda args: exp.PercentileCont(
288                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
289            ),
290            "NULLIFZERO": _build_if_from_nullifzero,
291            "OBJECT_CONSTRUCT": _build_object_construct,
292            "REGEXP_REPLACE": _build_regexp_replace,
293            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
294            "RLIKE": exp.RegexpLike.from_arg_list,
295            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
296            "TIMEADD": _build_date_time_add(exp.TimeAdd),
297            "TIMEDIFF": _build_datediff,
298            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
299            "TIMESTAMPDIFF": _build_datediff,
300            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
301            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
302            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
303            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
304            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
305            "TO_NUMBER": lambda args: exp.ToNumber(
306                this=seq_get(args, 0),
307                format=seq_get(args, 1),
308                precision=seq_get(args, 2),
309                scale=seq_get(args, 3),
310            ),
311            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
312            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
313            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
314            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
315            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
316            "TO_VARCHAR": exp.ToChar.from_arg_list,
317            "ZEROIFNULL": _build_if_from_zeroifnull,
318        }
319
320        FUNCTION_PARSERS = {
321            **parser.Parser.FUNCTION_PARSERS,
322            "DATE_PART": lambda self: self._parse_date_part(),
323            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
324        }
325        FUNCTION_PARSERS.pop("TRIM")
326
327        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
328
329        RANGE_PARSERS = {
330            **parser.Parser.RANGE_PARSERS,
331            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
332            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
333        }
334
335        ALTER_PARSERS = {
336            **parser.Parser.ALTER_PARSERS,
337            "UNSET": lambda self: self.expression(
338                exp.Set,
339                tag=self._match_text_seq("TAG"),
340                expressions=self._parse_csv(self._parse_id_var),
341                unset=True,
342            ),
343            "SWAP": lambda self: self._parse_alter_table_swap(),
344        }
345
346        STATEMENT_PARSERS = {
347            **parser.Parser.STATEMENT_PARSERS,
348            TokenType.SHOW: lambda self: self._parse_show(),
349        }
350
351        PROPERTY_PARSERS = {
352            **parser.Parser.PROPERTY_PARSERS,
353            "LOCATION": lambda self: self._parse_location_property(),
354        }
355
356        TYPE_CONVERTERS = {
357            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
358            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
359        }
360
361        SHOW_PARSERS = {
362            "SCHEMAS": _show_parser("SCHEMAS"),
363            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
364            "OBJECTS": _show_parser("OBJECTS"),
365            "TERSE OBJECTS": _show_parser("OBJECTS"),
366            "TABLES": _show_parser("TABLES"),
367            "TERSE TABLES": _show_parser("TABLES"),
368            "VIEWS": _show_parser("VIEWS"),
369            "TERSE VIEWS": _show_parser("VIEWS"),
370            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
371            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
372            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
373            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
374            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
375            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
376            "SEQUENCES": _show_parser("SEQUENCES"),
377            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
378            "COLUMNS": _show_parser("COLUMNS"),
379            "USERS": _show_parser("USERS"),
380            "TERSE USERS": _show_parser("USERS"),
381        }
382
383        CONSTRAINT_PARSERS = {
384            **parser.Parser.CONSTRAINT_PARSERS,
385            "WITH": lambda self: self._parse_with_constraint(),
386            "MASKING": lambda self: self._parse_with_constraint(),
387            "PROJECTION": lambda self: self._parse_with_constraint(),
388            "TAG": lambda self: self._parse_with_constraint(),
389        }
390
391        STAGED_FILE_SINGLE_TOKENS = {
392            TokenType.DOT,
393            TokenType.MOD,
394            TokenType.SLASH,
395        }
396
397        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
398
399        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
400
401        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
402
403        LAMBDAS = {
404            **parser.Parser.LAMBDAS,
405            TokenType.ARROW: lambda self, expressions: self.expression(
406                exp.Lambda,
407                this=self._replace_lambda(
408                    self._parse_assignment(),
409                    expressions,
410                ),
411                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
412            ),
413        }
414
415        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
416            if self._prev.token_type != TokenType.WITH:
417                self._retreat(self._index - 1)
418
419            if self._match_text_seq("MASKING", "POLICY"):
420                policy = self._parse_column()
421                return self.expression(
422                    exp.MaskingPolicyColumnConstraint,
423                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
424                    expressions=self._match(TokenType.USING)
425                    and self._parse_wrapped_csv(self._parse_id_var),
426                )
427            if self._match_text_seq("PROJECTION", "POLICY"):
428                policy = self._parse_column()
429                return self.expression(
430                    exp.ProjectionPolicyColumnConstraint,
431                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
432                )
433            if self._match(TokenType.TAG):
434                return self.expression(
435                    exp.TagColumnConstraint,
436                    expressions=self._parse_wrapped_csv(self._parse_property),
437                )
438
439            return None
440
441        def _parse_create(self) -> exp.Create | exp.Command:
442            expression = super()._parse_create()
443            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
444                # Replace the Table node with the enclosed Identifier
445                expression.this.replace(expression.this.this)
446
447            return expression
448
449        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
450        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
451        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
452            this = self._parse_var() or self._parse_type()
453
454            if not this:
455                return None
456
457            self._match(TokenType.COMMA)
458            expression = self._parse_bitwise()
459            this = map_date_part(this)
460            name = this.name.upper()
461
462            if name.startswith("EPOCH"):
463                if name == "EPOCH_MILLISECOND":
464                    scale = 10**3
465                elif name == "EPOCH_MICROSECOND":
466                    scale = 10**6
467                elif name == "EPOCH_NANOSECOND":
468                    scale = 10**9
469                else:
470                    scale = None
471
472                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
473                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
474
475                if scale:
476                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
477
478                return to_unix
479
480            return self.expression(exp.Extract, this=this, expression=expression)
481
482        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
483            if is_map:
484                # Keys are strings in Snowflake's objects, see also:
485                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
486                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
487                return self._parse_slice(self._parse_string())
488
489            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
490
491        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
492            lateral = super()._parse_lateral()
493            if not lateral:
494                return lateral
495
496            if isinstance(lateral.this, exp.Explode):
497                table_alias = lateral.args.get("alias")
498                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
499                if table_alias and not table_alias.args.get("columns"):
500                    table_alias.set("columns", columns)
501                elif not table_alias:
502                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
503
504            return lateral
505
506        def _parse_at_before(self, table: exp.Table) -> exp.Table:
507            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
508            index = self._index
509            if self._match_texts(("AT", "BEFORE")):
510                this = self._prev.text.upper()
511                kind = (
512                    self._match(TokenType.L_PAREN)
513                    and self._match_texts(self.HISTORICAL_DATA_KIND)
514                    and self._prev.text.upper()
515                )
516                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
517
518                if expression:
519                    self._match_r_paren()
520                    when = self.expression(
521                        exp.HistoricalData, this=this, kind=kind, expression=expression
522                    )
523                    table.set("when", when)
524                else:
525                    self._retreat(index)
526
527            return table
528
529        def _parse_table_parts(
530            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
531        ) -> exp.Table:
532            # https://docs.snowflake.com/en/user-guide/querying-stage
533            if self._match(TokenType.STRING, advance=False):
534                table = self._parse_string()
535            elif self._match_text_seq("@", advance=False):
536                table = self._parse_location_path()
537            else:
538                table = None
539
540            if table:
541                file_format = None
542                pattern = None
543
544                wrapped = self._match(TokenType.L_PAREN)
545                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
546                    if self._match_text_seq("FILE_FORMAT", "=>"):
547                        file_format = self._parse_string() or super()._parse_table_parts(
548                            is_db_reference=is_db_reference
549                        )
550                    elif self._match_text_seq("PATTERN", "=>"):
551                        pattern = self._parse_string()
552                    else:
553                        break
554
555                    self._match(TokenType.COMMA)
556
557                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
558            else:
559                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
560
561            return self._parse_at_before(table)
562
563        def _parse_id_var(
564            self,
565            any_token: bool = True,
566            tokens: t.Optional[t.Collection[TokenType]] = None,
567        ) -> t.Optional[exp.Expression]:
568            if self._match_text_seq("IDENTIFIER", "("):
569                identifier = (
570                    super()._parse_id_var(any_token=any_token, tokens=tokens)
571                    or self._parse_string()
572                )
573                self._match_r_paren()
574                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
575
576            return super()._parse_id_var(any_token=any_token, tokens=tokens)
577
578        def _parse_show_snowflake(self, this: str) -> exp.Show:
579            scope = None
580            scope_kind = None
581
582            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
583            # which is syntactically valid but has no effect on the output
584            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
585
586            history = self._match_text_seq("HISTORY")
587
588            like = self._parse_string() if self._match(TokenType.LIKE) else None
589
590            if self._match(TokenType.IN):
591                if self._match_text_seq("ACCOUNT"):
592                    scope_kind = "ACCOUNT"
593                elif self._match_set(self.DB_CREATABLES):
594                    scope_kind = self._prev.text.upper()
595                    if self._curr:
596                        scope = self._parse_table_parts()
597                elif self._curr:
598                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
599                    scope = self._parse_table_parts()
600
601            return self.expression(
602                exp.Show,
603                **{
604                    "terse": terse,
605                    "this": this,
606                    "history": history,
607                    "like": like,
608                    "scope": scope,
609                    "scope_kind": scope_kind,
610                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
611                    "limit": self._parse_limit(),
612                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
613                },
614            )
615
616        def _parse_alter_table_swap(self) -> exp.SwapTable:
617            self._match_text_seq("WITH")
618            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
619
620        def _parse_location_property(self) -> exp.LocationProperty:
621            self._match(TokenType.EQ)
622            return self.expression(exp.LocationProperty, this=self._parse_location_path())
623
624        def _parse_file_location(self) -> t.Optional[exp.Expression]:
625            # Parse either a subquery or a staged file
626            return (
627                self._parse_select(table=True, parse_subquery_alias=False)
628                if self._match(TokenType.L_PAREN, advance=False)
629                else self._parse_table_parts()
630            )
631
632        def _parse_location_path(self) -> exp.Var:
633            parts = [self._advance_any(ignore_reserved=True)]
634
635            # We avoid consuming a comma token because external tables like @foo and @bar
636            # can be joined in a query with a comma separator, as well as closing paren
637            # in case of subqueries
638            while self._is_connected() and not self._match_set(
639                (TokenType.COMMA, TokenType.R_PAREN), advance=False
640            ):
641                parts.append(self._advance_any(ignore_reserved=True))
642
643            return exp.var("".join(part.text for part in parts if part))
644
645        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
646            this = super()._parse_lambda_arg()
647
648            if not this:
649                return this
650
651            typ = self._parse_types()
652
653            if typ:
654                return self.expression(exp.Cast, this=this, to=typ)
655
656            return this

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
DEFAULT_SAMPLING_METHOD = 'BERNOULLI'
COLON_IS_JSON_EXTRACT = True
ID_VAR_TOKENS = {<TokenType.REPLACE: 'REPLACE'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.CASE: 'CASE'>, <TokenType.LEFT: 'LEFT'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.VAR: 'VAR'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.INT: 'INT'>, <TokenType.MERGE: 'MERGE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.ASC: 'ASC'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.JSON: 'JSON'>, <TokenType.NULL: 'NULL'>, <TokenType.ROW: 'ROW'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.CACHE: 'CACHE'>, <TokenType.USE: 'USE'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.INT128: 'INT128'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.RANGE: 'RANGE'>, <TokenType.LIST: 'LIST'>, <TokenType.SUPER: 'SUPER'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.UUID: 'UUID'>, <TokenType.TEXT: 'TEXT'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.DATE: 'DATE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.ALL: 'ALL'>, <TokenType.DESC: 'DESC'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.CHAR: 'CHAR'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.NAME: 'NAME'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.MAP: 'MAP'>, <TokenType.LOAD: 'LOAD'>, <TokenType.VIEW: 'VIEW'>, <TokenType.BIT: 'BIT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INDEX: 'INDEX'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.NESTED: 'NESTED'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.ANY: 'ANY'>, <TokenType.UINT: 'UINT'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.DIV: 'DIV'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IS: 'IS'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.XML: 'XML'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.COPY: 'COPY'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.FALSE: 'FALSE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.FILTER: 'FILTER'>, <TokenType.FULL: 'FULL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.ROWS: 'ROWS'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.TAG: 'TAG'>, <TokenType.KILL: 'KILL'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.FINAL: 'FINAL'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.KEEP: 'KEEP'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.INET: 'INET'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.DATE32: 'DATE32'>, <TokenType.TRUE: 'TRUE'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.IPV6: 'IPV6'>, <TokenType.SET: 'SET'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.DELETE: 'DELETE'>, <TokenType.ASOF: 'ASOF'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TIME: 'TIME'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.TABLE: 'TABLE'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.MODEL: 'MODEL'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.SOME: 'SOME'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.UINT256: 'UINT256'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TOP: 'TOP'>, <TokenType.ANTI: 'ANTI'>, <TokenType.SEMI: 'SEMI'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.END: 'END'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.APPLY: 'APPLY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INT256: 'INT256'>, <TokenType.UINT128: 'UINT128'>, <TokenType.ISNULL: 'ISNULL'>}
TABLE_ALIAS_TOKENS = {<TokenType.REPLACE: 'REPLACE'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.CASE: 'CASE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.VAR: 'VAR'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.INT: 'INT'>, <TokenType.MERGE: 'MERGE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.ASC: 'ASC'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.JSON: 'JSON'>, <TokenType.NULL: 'NULL'>, <TokenType.ROW: 'ROW'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.CACHE: 'CACHE'>, <TokenType.USE: 'USE'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.INT128: 'INT128'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.RANGE: 'RANGE'>, <TokenType.LIST: 'LIST'>, <TokenType.SUPER: 'SUPER'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.UUID: 'UUID'>, <TokenType.TEXT: 'TEXT'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.DATE: 'DATE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.ALL: 'ALL'>, <TokenType.DESC: 'DESC'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.CHAR: 'CHAR'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.NAME: 'NAME'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.MAP: 'MAP'>, <TokenType.LOAD: 'LOAD'>, <TokenType.VIEW: 'VIEW'>, <TokenType.BIT: 'BIT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INDEX: 'INDEX'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.ANY: 'ANY'>, <TokenType.UINT: 'UINT'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.DIV: 'DIV'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.IS: 'IS'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.XML: 'XML'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.COPY: 'COPY'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.FALSE: 'FALSE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.FILTER: 'FILTER'>, <TokenType.NEXT: 'NEXT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.ROWS: 'ROWS'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.TAG: 'TAG'>, <TokenType.KILL: 'KILL'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.FINAL: 'FINAL'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.KEEP: 'KEEP'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.INET: 'INET'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.DATE32: 'DATE32'>, <TokenType.TRUE: 'TRUE'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.IPV6: 'IPV6'>, <TokenType.SET: 'SET'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.DELETE: 'DELETE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.TIME: 'TIME'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.TABLE: 'TABLE'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.MODEL: 'MODEL'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.SOME: 'SOME'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.UINT256: 'UINT256'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TOP: 'TOP'>, <TokenType.ANTI: 'ANTI'>, <TokenType.SEMI: 'SEMI'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.END: 'END'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INT256: 'INT256'>, <TokenType.UINT128: 'UINT128'>, <TokenType.ISNULL: 'ISNULL'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Datetime'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateDateArray'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Time'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'TO_HEX': <function build_hex>, 'APPROX_PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAYAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_CONSTRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'CONVERT_TIMEZONE': <function _build_convert_timezone>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'MEDIAN': <function Snowflake.Parser.<lambda>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TRY_PARSE_JSON': <function Snowflake.Parser.<lambda>>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>}
RANGE_PARSERS = {<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>, 'SWAP': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
TYPE_CONVERTERS = {<Type.DECIMAL: 'DECIMAL'>: <function build_default_decimal_type.<locals>._builder>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Snowflake.Parser.<lambda>>, 'MASKING': <function Snowflake.Parser.<lambda>>, 'PROJECTION': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.MOD: 'MOD'>, <TokenType.SLASH: 'SLASH'>, <TokenType.DOT: 'DOT'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'VIEWS', 'OBJECTS', 'UNIQUE KEYS', 'TABLES', 'SEQUENCES', 'IMPORTED KEYS'}
NON_TABLE_CREATABLES = {'WAREHOUSE', 'STORAGE INTEGRATION', 'TAG', 'STREAMLIT'}
LAMBDAS = {<TokenType.ARROW: 'ARROW'>: <function Snowflake.Parser.<lambda>>, <TokenType.FARROW: 'FARROW'>: <function Parser.<lambda>>}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
INTERVAL_VARS
ALIAS_TOKENS
ARRAY_CONSTRUCTORS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
658    class Tokenizer(tokens.Tokenizer):
659        STRING_ESCAPES = ["\\", "'"]
660        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
661        RAW_STRINGS = ["$$"]
662        COMMENTS = ["--", "//", ("/*", "*/")]
663
664        KEYWORDS = {
665            **tokens.Tokenizer.KEYWORDS,
666            "BYTEINT": TokenType.INT,
667            "CHAR VARYING": TokenType.VARCHAR,
668            "CHARACTER VARYING": TokenType.VARCHAR,
669            "EXCLUDE": TokenType.EXCEPT,
670            "ILIKE ANY": TokenType.ILIKE_ANY,
671            "LIKE ANY": TokenType.LIKE_ANY,
672            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
673            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
674            "MINUS": TokenType.EXCEPT,
675            "NCHAR VARYING": TokenType.VARCHAR,
676            "PUT": TokenType.COMMAND,
677            "REMOVE": TokenType.COMMAND,
678            "RM": TokenType.COMMAND,
679            "SAMPLE": TokenType.TABLE_SAMPLE,
680            "SQL_DOUBLE": TokenType.DOUBLE,
681            "SQL_VARCHAR": TokenType.VARCHAR,
682            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
683            "TAG": TokenType.TAG,
684            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
685            "TOP": TokenType.TOP,
686            "WAREHOUSE": TokenType.WAREHOUSE,
687            "STREAMLIT": TokenType.STREAMLIT,
688        }
689        KEYWORDS.pop("/*+")
690
691        SINGLE_TOKENS = {
692            **tokens.Tokenizer.SINGLE_TOKENS,
693            "$": TokenType.PARAMETER,
694        }
695
696        VAR_SINGLE_TOKENS = {"$"}
697
698        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TAG': <TokenType.TAG: 'TAG'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TOP': <TokenType.TOP: 'TOP'>, 'WAREHOUSE': <TokenType.WAREHOUSE: 'WAREHOUSE'>, 'STREAMLIT': <TokenType.STREAMLIT: 'STREAMLIT'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FETCH: 'FETCH'>, <TokenType.COMMAND: 'COMMAND'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 700    class Generator(generator.Generator):
 701        PARAMETER_TOKEN = "$"
 702        MATCHED_BY_SOURCE = False
 703        SINGLE_STRING_INTERVAL = True
 704        JOIN_HINTS = False
 705        TABLE_HINTS = False
 706        QUERY_HINTS = False
 707        AGGREGATE_FILTER_SUPPORTED = False
 708        SUPPORTS_TABLE_COPY = False
 709        COLLATE_IS_FUNC = True
 710        LIMIT_ONLY_LITERALS = True
 711        JSON_KEY_VALUE_PAIR_SEP = ","
 712        INSERT_OVERWRITE = " OVERWRITE INTO"
 713        STRUCT_DELIMITER = ("(", ")")
 714        COPY_PARAMS_ARE_WRAPPED = False
 715        COPY_PARAMS_EQ_REQUIRED = True
 716        STAR_EXCEPT = "EXCLUDE"
 717
 718        TRANSFORMS = {
 719            **generator.Generator.TRANSFORMS,
 720            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 721            exp.ArgMax: rename_func("MAX_BY"),
 722            exp.ArgMin: rename_func("MIN_BY"),
 723            exp.Array: inline_array_sql,
 724            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 725            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 726            exp.AtTimeZone: lambda self, e: self.func(
 727                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 728            ),
 729            exp.BitwiseXor: rename_func("BITXOR"),
 730            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 731            exp.DateAdd: date_delta_sql("DATEADD"),
 732            exp.DateDiff: date_delta_sql("DATEDIFF"),
 733            exp.DateStrToDate: datestrtodate_sql,
 734            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 735            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 736            exp.DayOfYear: rename_func("DAYOFYEAR"),
 737            exp.Explode: rename_func("FLATTEN"),
 738            exp.Extract: rename_func("DATE_PART"),
 739            exp.FromTimeZone: lambda self, e: self.func(
 740                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 741            ),
 742            exp.GenerateSeries: lambda self, e: self.func(
 743                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 744            ),
 745            exp.GroupConcat: rename_func("LISTAGG"),
 746            exp.If: if_sql(name="IFF", false_value="NULL"),
 747            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 748            exp.JSONExtractScalar: lambda self, e: self.func(
 749                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 750            ),
 751            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 752            exp.JSONPathRoot: lambda *_: "",
 753            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 754            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 755            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 756            exp.Max: max_or_greatest,
 757            exp.Min: min_or_least,
 758            exp.ParseJSON: lambda self, e: self.func(
 759                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 760            ),
 761            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 762            exp.PercentileCont: transforms.preprocess(
 763                [transforms.add_within_group_for_percentiles]
 764            ),
 765            exp.PercentileDisc: transforms.preprocess(
 766                [transforms.add_within_group_for_percentiles]
 767            ),
 768            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 769            exp.RegexpILike: _regexpilike_sql,
 770            exp.Rand: rename_func("RANDOM"),
 771            exp.Select: transforms.preprocess(
 772                [
 773                    transforms.eliminate_distinct_on,
 774                    transforms.explode_to_unnest(),
 775                    transforms.eliminate_semi_and_anti_joins,
 776                ]
 777            ),
 778            exp.SHA: rename_func("SHA1"),
 779            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 780            exp.StartsWith: rename_func("STARTSWITH"),
 781            exp.StrPosition: lambda self, e: self.func(
 782                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 783            ),
 784            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 785            exp.Stuff: rename_func("INSERT"),
 786            exp.TimeAdd: date_delta_sql("TIMEADD"),
 787            exp.TimestampDiff: lambda self, e: self.func(
 788                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 789            ),
 790            exp.TimestampTrunc: timestamptrunc_sql(),
 791            exp.TimeStrToTime: timestrtotime_sql,
 792            exp.TimeToStr: lambda self, e: self.func(
 793                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 794            ),
 795            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 796            exp.ToArray: rename_func("TO_ARRAY"),
 797            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 798            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 799            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 800            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 801            exp.TsOrDsToDate: lambda self, e: self.func(
 802                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 803            ),
 804            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 805            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 806            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 807            exp.Xor: rename_func("BOOLXOR"),
 808        }
 809
 810        SUPPORTED_JSON_PATH_PARTS = {
 811            exp.JSONPathKey,
 812            exp.JSONPathRoot,
 813            exp.JSONPathSubscript,
 814        }
 815
 816        TYPE_MAPPING = {
 817            **generator.Generator.TYPE_MAPPING,
 818            exp.DataType.Type.NESTED: "OBJECT",
 819            exp.DataType.Type.STRUCT: "OBJECT",
 820        }
 821
 822        PROPERTIES_LOCATION = {
 823            **generator.Generator.PROPERTIES_LOCATION,
 824            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 825            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 826        }
 827
 828        UNSUPPORTED_VALUES_EXPRESSIONS = {
 829            exp.Map,
 830            exp.StarMap,
 831            exp.Struct,
 832            exp.VarMap,
 833        }
 834
 835        def with_properties(self, properties: exp.Properties) -> str:
 836            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 837
 838        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 839            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 840                values_as_table = False
 841
 842            return super().values_sql(expression, values_as_table=values_as_table)
 843
 844        def datatype_sql(self, expression: exp.DataType) -> str:
 845            expressions = expression.expressions
 846            if (
 847                expressions
 848                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 849                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 850            ):
 851                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 852                return "OBJECT"
 853
 854            return super().datatype_sql(expression)
 855
 856        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 857            return self.func(
 858                "TO_NUMBER",
 859                expression.this,
 860                expression.args.get("format"),
 861                expression.args.get("precision"),
 862                expression.args.get("scale"),
 863            )
 864
 865        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 866            milli = expression.args.get("milli")
 867            if milli is not None:
 868                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 869                expression.set("nano", milli_to_nano)
 870
 871            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 872
 873        def trycast_sql(self, expression: exp.TryCast) -> str:
 874            value = expression.this
 875
 876            if value.type is None:
 877                from sqlglot.optimizer.annotate_types import annotate_types
 878
 879                value = annotate_types(value)
 880
 881            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 882                return super().trycast_sql(expression)
 883
 884            # TRY_CAST only works for string values in Snowflake
 885            return self.cast_sql(expression)
 886
 887        def log_sql(self, expression: exp.Log) -> str:
 888            if not expression.expression:
 889                return self.func("LN", expression.this)
 890
 891            return super().log_sql(expression)
 892
 893        def unnest_sql(self, expression: exp.Unnest) -> str:
 894            unnest_alias = expression.args.get("alias")
 895            offset = expression.args.get("offset")
 896
 897            columns = [
 898                exp.to_identifier("seq"),
 899                exp.to_identifier("key"),
 900                exp.to_identifier("path"),
 901                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 902                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 903                or exp.to_identifier("value"),
 904                exp.to_identifier("this"),
 905            ]
 906
 907            if unnest_alias:
 908                unnest_alias.set("columns", columns)
 909            else:
 910                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 911
 912            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 913            alias = self.sql(unnest_alias)
 914            alias = f" AS {alias}" if alias else ""
 915            return f"{explode}{alias}"
 916
 917        def show_sql(self, expression: exp.Show) -> str:
 918            terse = "TERSE " if expression.args.get("terse") else ""
 919            history = " HISTORY" if expression.args.get("history") else ""
 920            like = self.sql(expression, "like")
 921            like = f" LIKE {like}" if like else ""
 922
 923            scope = self.sql(expression, "scope")
 924            scope = f" {scope}" if scope else ""
 925
 926            scope_kind = self.sql(expression, "scope_kind")
 927            if scope_kind:
 928                scope_kind = f" IN {scope_kind}"
 929
 930            starts_with = self.sql(expression, "starts_with")
 931            if starts_with:
 932                starts_with = f" STARTS WITH {starts_with}"
 933
 934            limit = self.sql(expression, "limit")
 935
 936            from_ = self.sql(expression, "from")
 937            if from_:
 938                from_ = f" FROM {from_}"
 939
 940            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 941
 942        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 943            # Other dialects don't support all of the following parameters, so we need to
 944            # generate default values as necessary to ensure the transpilation is correct
 945            group = expression.args.get("group")
 946            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 947            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 948            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 949
 950            return self.func(
 951                "REGEXP_SUBSTR",
 952                expression.this,
 953                expression.expression,
 954                position,
 955                occurrence,
 956                parameters,
 957                group,
 958            )
 959
 960        def except_op(self, expression: exp.Except) -> str:
 961            if not expression.args.get("distinct"):
 962                self.unsupported("EXCEPT with All is not supported in Snowflake")
 963            return super().except_op(expression)
 964
 965        def intersect_op(self, expression: exp.Intersect) -> str:
 966            if not expression.args.get("distinct"):
 967                self.unsupported("INTERSECT with All is not supported in Snowflake")
 968            return super().intersect_op(expression)
 969
 970        def describe_sql(self, expression: exp.Describe) -> str:
 971            # Default to table if kind is unknown
 972            kind_value = expression.args.get("kind") or "TABLE"
 973            kind = f" {kind_value}" if kind_value else ""
 974            this = f" {self.sql(expression, 'this')}"
 975            expressions = self.expressions(expression, flat=True)
 976            expressions = f" {expressions}" if expressions else ""
 977            return f"DESCRIBE{kind}{this}{expressions}"
 978
 979        def generatedasidentitycolumnconstraint_sql(
 980            self, expression: exp.GeneratedAsIdentityColumnConstraint
 981        ) -> str:
 982            start = expression.args.get("start")
 983            start = f" START {start}" if start else ""
 984            increment = expression.args.get("increment")
 985            increment = f" INCREMENT {increment}" if increment else ""
 986            return f"AUTOINCREMENT{start}{increment}"
 987
 988        def swaptable_sql(self, expression: exp.SwapTable) -> str:
 989            this = self.sql(expression, "this")
 990            return f"SWAP WITH {this}"
 991
 992        def cluster_sql(self, expression: exp.Cluster) -> str:
 993            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
 994
 995        def struct_sql(self, expression: exp.Struct) -> str:
 996            keys = []
 997            values = []
 998
 999            for i, e in enumerate(expression.expressions):
1000                if isinstance(e, exp.PropertyEQ):
1001                    keys.append(
1002                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1003                    )
1004                    values.append(e.expression)
1005                else:
1006                    keys.append(exp.Literal.string(f"_{i}"))
1007                    values.append(e)
1008
1009            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1010
1011        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1012            if expression.args.get("weight") or expression.args.get("accuracy"):
1013                self.unsupported(
1014                    "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake"
1015                )
1016
1017            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1018
1019        def alterset_sql(self, expression: exp.AlterSet) -> str:
1020            exprs = self.expressions(expression, flat=True)
1021            exprs = f" {exprs}" if exprs else ""
1022            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1023            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1024            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1025            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1026            tag = self.expressions(expression, key="tag", flat=True)
1027            tag = f" TAG {tag}" if tag else ""
1028
1029            return f"SET{exprs}{file_format}{copy_options}{tag}"

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
COPY_PARAMS_ARE_WRAPPED = False
COPY_PARAMS_EQ_REQUIRED = True
STAR_EXCEPT = 'EXCLUDE'
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TagColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Timestamp'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.ParseJSON'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Trim'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.Map'>, <class 'sqlglot.expressions.StarMap'>, <class 'sqlglot.expressions.Struct'>, <class 'sqlglot.expressions.VarMap'>}
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
835        def with_properties(self, properties: exp.Properties) -> str:
836            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
838        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
839            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
840                values_as_table = False
841
842            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
844        def datatype_sql(self, expression: exp.DataType) -> str:
845            expressions = expression.expressions
846            if (
847                expressions
848                and expression.is_type(*exp.DataType.STRUCT_TYPES)
849                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
850            ):
851                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
852                return "OBJECT"
853
854            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
856        def tonumber_sql(self, expression: exp.ToNumber) -> str:
857            return self.func(
858                "TO_NUMBER",
859                expression.this,
860                expression.args.get("format"),
861                expression.args.get("precision"),
862                expression.args.get("scale"),
863            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
865        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
866            milli = expression.args.get("milli")
867            if milli is not None:
868                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
869                expression.set("nano", milli_to_nano)
870
871            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
873        def trycast_sql(self, expression: exp.TryCast) -> str:
874            value = expression.this
875
876            if value.type is None:
877                from sqlglot.optimizer.annotate_types import annotate_types
878
879                value = annotate_types(value)
880
881            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
882                return super().trycast_sql(expression)
883
884            # TRY_CAST only works for string values in Snowflake
885            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
887        def log_sql(self, expression: exp.Log) -> str:
888            if not expression.expression:
889                return self.func("LN", expression.this)
890
891            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
893        def unnest_sql(self, expression: exp.Unnest) -> str:
894            unnest_alias = expression.args.get("alias")
895            offset = expression.args.get("offset")
896
897            columns = [
898                exp.to_identifier("seq"),
899                exp.to_identifier("key"),
900                exp.to_identifier("path"),
901                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
902                seq_get(unnest_alias.columns if unnest_alias else [], 0)
903                or exp.to_identifier("value"),
904                exp.to_identifier("this"),
905            ]
906
907            if unnest_alias:
908                unnest_alias.set("columns", columns)
909            else:
910                unnest_alias = exp.TableAlias(this="_u", columns=columns)
911
912            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
913            alias = self.sql(unnest_alias)
914            alias = f" AS {alias}" if alias else ""
915            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
917        def show_sql(self, expression: exp.Show) -> str:
918            terse = "TERSE " if expression.args.get("terse") else ""
919            history = " HISTORY" if expression.args.get("history") else ""
920            like = self.sql(expression, "like")
921            like = f" LIKE {like}" if like else ""
922
923            scope = self.sql(expression, "scope")
924            scope = f" {scope}" if scope else ""
925
926            scope_kind = self.sql(expression, "scope_kind")
927            if scope_kind:
928                scope_kind = f" IN {scope_kind}"
929
930            starts_with = self.sql(expression, "starts_with")
931            if starts_with:
932                starts_with = f" STARTS WITH {starts_with}"
933
934            limit = self.sql(expression, "limit")
935
936            from_ = self.sql(expression, "from")
937            if from_:
938                from_ = f" FROM {from_}"
939
940            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
def regexpextract_sql(self, expression: sqlglot.expressions.RegexpExtract) -> str:
942        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
943            # Other dialects don't support all of the following parameters, so we need to
944            # generate default values as necessary to ensure the transpilation is correct
945            group = expression.args.get("group")
946            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
947            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
948            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
949
950            return self.func(
951                "REGEXP_SUBSTR",
952                expression.this,
953                expression.expression,
954                position,
955                occurrence,
956                parameters,
957                group,
958            )
def except_op(self, expression: sqlglot.expressions.Except) -> str:
960        def except_op(self, expression: exp.Except) -> str:
961            if not expression.args.get("distinct"):
962                self.unsupported("EXCEPT with All is not supported in Snowflake")
963            return super().except_op(expression)
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
965        def intersect_op(self, expression: exp.Intersect) -> str:
966            if not expression.args.get("distinct"):
967                self.unsupported("INTERSECT with All is not supported in Snowflake")
968            return super().intersect_op(expression)
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
970        def describe_sql(self, expression: exp.Describe) -> str:
971            # Default to table if kind is unknown
972            kind_value = expression.args.get("kind") or "TABLE"
973            kind = f" {kind_value}" if kind_value else ""
974            this = f" {self.sql(expression, 'this')}"
975            expressions = self.expressions(expression, flat=True)
976            expressions = f" {expressions}" if expressions else ""
977            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
979        def generatedasidentitycolumnconstraint_sql(
980            self, expression: exp.GeneratedAsIdentityColumnConstraint
981        ) -> str:
982            start = expression.args.get("start")
983            start = f" START {start}" if start else ""
984            increment = expression.args.get("increment")
985            increment = f" INCREMENT {increment}" if increment else ""
986            return f"AUTOINCREMENT{start}{increment}"
def swaptable_sql(self, expression: sqlglot.expressions.SwapTable) -> str:
988        def swaptable_sql(self, expression: exp.SwapTable) -> str:
989            this = self.sql(expression, "this")
990            return f"SWAP WITH {this}"
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
992        def cluster_sql(self, expression: exp.Cluster) -> str:
993            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
 995        def struct_sql(self, expression: exp.Struct) -> str:
 996            keys = []
 997            values = []
 998
 999            for i, e in enumerate(expression.expressions):
1000                if isinstance(e, exp.PropertyEQ):
1001                    keys.append(
1002                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1003                    )
1004                    values.append(e.expression)
1005                else:
1006                    keys.append(exp.Literal.string(f"_{i}"))
1007                    values.append(e)
1008
1009            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
def approxquantile_sql(self, expression: sqlglot.expressions.ApproxQuantile) -> str:
1011        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1012            if expression.args.get("weight") or expression.args.get("accuracy"):
1013                self.unsupported(
1014                    "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake"
1015                )
1016
1017            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
def alterset_sql(self, expression: sqlglot.expressions.AlterSet) -> str:
1019        def alterset_sql(self, expression: exp.AlterSet) -> str:
1020            exprs = self.expressions(expression, flat=True)
1021            exprs = f" {exprs}" if exprs else ""
1022            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1023            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1024            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1025            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1026            tag = self.expressions(expression, key="tag", flat=True)
1027            tag = f" TAG {tag}" if tag else ""
1028
1029            return f"SET{exprs}{file_format}{copy_options}{tag}"
SELECT_KINDS: Tuple[str, ...] = ()
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
AFTER_HAVING_MODIFIER_TRANSFORMS = {'qualify': <function Generator.<lambda>>, 'windows': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
EXPLICIT_SET_OP
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
SET_OP_MODIFIERS
COPY_HAS_INTO_KEYWORD
HEX_FUNC
WITH_PROPERTIES_PREFIX
QUOTE_JSON_PATH
TIME_PART_SINGULARS
TOKEN_MAPPING
NAMED_PLACEHOLDER_TOKEN
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_parts
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
intersect_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
set_operations
union_sql
union_op
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
currenttimestamp_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterdiststyle_sql
altersortkey_sql
renametable_sql
renamecolumn_sql
altertable_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
try_sql
use_sql
binary
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
operator_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
generateseries_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql