diff --git a/.check.exs b/.check.exs new file mode 100644 index 0000000..91e14cb --- /dev/null +++ b/.check.exs @@ -0,0 +1,21 @@ +[ + ## all available options with default values (see `mix check` docs for description) + # parallel: true, + # skipped: true, + retry: false, + ## list of tools (see `mix check` docs for defaults) + tools: [ + ## curated tools may be disabled (e.g. the check for compilation warnings) + # {:compiler, false}, + + ## ...or adjusted (e.g. use one-line formatter for more compact credo output) + # {:credo, "mix credo --format oneline"}, + + {:check_formatter, command: "mix spark.formatter --check"}, + {:check_migrations, command: "mix test.check_migrations"} + ## custom new tools may be added (mix tasks or arbitrary commands) + # {:my_mix_task, command: "mix release", env: %{"MIX_ENV" => "prod"}}, + # {:my_arbitrary_tool, command: "npm test", cd: "assets"}, + # {:my_arbitrary_script, command: ["my_script", "argument with spaces"], cd: "scripts"} + ] +] diff --git a/.credo.exs b/.credo.exs new file mode 100644 index 0000000..a986fd0 --- /dev/null +++ b/.credo.exs @@ -0,0 +1,184 @@ +# This file contains the configuration for Credo and you are probably reading +# this after creating it with `mix credo.gen.config`. +# +# If you find anything wrong or unclear in this file, please report an +# issue on GitHub: https://github.com/rrrene/credo/issues +# +%{ + # + # You can have as many configs as you like in the `configs:` field. + configs: [ + %{ + # + # Run any config using `mix credo -C `. If no config name is given + # "default" is used. + # + name: "default", + # + # These are the files included in the analysis: + files: %{ + # + # You can give explicit globs or simply directories. + # In the latter case `**/*.{ex,exs}` will be used. + # + included: [ + "lib/", + "src/", + "test/", + "web/", + "apps/*/lib/", + "apps/*/src/", + "apps/*/test/", + "apps/*/web/" + ], + excluded: [~r"/_build/", ~r"/deps/", ~r"/node_modules/"] + }, + # + # Load and configure plugins here: + # + plugins: [], + # + # If you create your own checks, you must specify the source files for + # them here, so they can be loaded by Credo before running the analysis. + # + requires: [], + # + # If you want to enforce a style guide and need a more traditional linting + # experience, you can change `strict` to `true` below: + # + strict: false, + # + # To modify the timeout for parsing files, change this value: + # + parse_timeout: 5000, + # + # If you want to use uncolored output by default, you can change `color` + # to `false` below: + # + color: true, + # + # You can customize the parameters of any check by adding a second element + # to the tuple. + # + # To disable a check put `false` as second element: + # + # {Credo.Check.Design.DuplicatedCode, false} + # + checks: [ + # + ## Consistency Checks + # + {Credo.Check.Consistency.ExceptionNames, []}, + {Credo.Check.Consistency.LineEndings, []}, + {Credo.Check.Consistency.ParameterPatternMatching, []}, + {Credo.Check.Consistency.SpaceAroundOperators, false}, + {Credo.Check.Consistency.SpaceInParentheses, []}, + {Credo.Check.Consistency.TabsOrSpaces, []}, + + # + ## Design Checks + # + # You can customize the priority of any check + # Priority values are: `low, normal, high, higher` + # + {Credo.Check.Design.AliasUsage, false}, + # You can also customize the exit_status of each check. + # If you don't want TODO comments to cause `mix credo` to fail, just + # set this value to 0 (zero). + # + {Credo.Check.Design.TagTODO, false}, + {Credo.Check.Design.TagFIXME, []}, + + # + ## Readability Checks + # + {Credo.Check.Readability.AliasOrder, []}, + {Credo.Check.Readability.FunctionNames, []}, + {Credo.Check.Readability.LargeNumbers, []}, + {Credo.Check.Readability.MaxLineLength, [priority: :low, max_length: 120]}, + {Credo.Check.Readability.ModuleAttributeNames, []}, + {Credo.Check.Readability.ModuleDoc, []}, + {Credo.Check.Readability.ModuleNames, []}, + {Credo.Check.Readability.ParenthesesInCondition, false}, + {Credo.Check.Readability.ParenthesesOnZeroArityDefs, []}, + {Credo.Check.Readability.PredicateFunctionNames, []}, + {Credo.Check.Readability.PreferImplicitTry, []}, + {Credo.Check.Readability.RedundantBlankLines, []}, + {Credo.Check.Readability.Semicolons, []}, + {Credo.Check.Readability.SpaceAfterCommas, []}, + {Credo.Check.Readability.StringSigils, []}, + {Credo.Check.Readability.TrailingBlankLine, []}, + {Credo.Check.Readability.TrailingWhiteSpace, []}, + {Credo.Check.Readability.UnnecessaryAliasExpansion, []}, + {Credo.Check.Readability.VariableNames, []}, + + # + ## Refactoring Opportunities + # + {Credo.Check.Refactor.CondStatements, []}, + {Credo.Check.Refactor.CyclomaticComplexity, false}, + {Credo.Check.Refactor.FunctionArity, []}, + {Credo.Check.Refactor.LongQuoteBlocks, []}, + {Credo.Check.Refactor.MapInto, []}, + {Credo.Check.Refactor.MatchInCondition, []}, + {Credo.Check.Refactor.NegatedConditionsInUnless, []}, + {Credo.Check.Refactor.NegatedConditionsWithElse, []}, + {Credo.Check.Refactor.Nesting, [max_nesting: 5]}, + {Credo.Check.Refactor.UnlessWithElse, []}, + {Credo.Check.Refactor.WithClauses, []}, + + # + ## Warnings + # + {Credo.Check.Warning.BoolOperationOnSameValues, []}, + {Credo.Check.Warning.ExpensiveEmptyEnumCheck, []}, + {Credo.Check.Warning.IExPry, []}, + {Credo.Check.Warning.IoInspect, []}, + {Credo.Check.Warning.LazyLogging, []}, + {Credo.Check.Warning.MixEnv, false}, + {Credo.Check.Warning.OperationOnSameValues, []}, + {Credo.Check.Warning.OperationWithConstantResult, []}, + {Credo.Check.Warning.RaiseInsideRescue, []}, + {Credo.Check.Warning.UnusedEnumOperation, []}, + {Credo.Check.Warning.UnusedFileOperation, []}, + {Credo.Check.Warning.UnusedKeywordOperation, []}, + {Credo.Check.Warning.UnusedListOperation, []}, + {Credo.Check.Warning.UnusedPathOperation, []}, + {Credo.Check.Warning.UnusedRegexOperation, []}, + {Credo.Check.Warning.UnusedStringOperation, []}, + {Credo.Check.Warning.UnusedTupleOperation, []}, + {Credo.Check.Warning.UnsafeExec, []}, + + # + # Checks scheduled for next check update (opt-in for now, just replace `false` with `[]`) + + # + # Controversial and experimental checks (opt-in, just replace `false` with `[]`) + # + {Credo.Check.Readability.StrictModuleLayout, false}, + {Credo.Check.Consistency.MultiAliasImportRequireUse, false}, + {Credo.Check.Consistency.UnusedVariableNames, false}, + {Credo.Check.Design.DuplicatedCode, false}, + {Credo.Check.Readability.AliasAs, false}, + {Credo.Check.Readability.MultiAlias, false}, + {Credo.Check.Readability.Specs, false}, + {Credo.Check.Readability.SinglePipe, false}, + {Credo.Check.Readability.WithCustomTaggedTuple, false}, + {Credo.Check.Refactor.ABCSize, false}, + {Credo.Check.Refactor.AppendSingleItem, false}, + {Credo.Check.Refactor.DoubleBooleanNegation, false}, + {Credo.Check.Refactor.ModuleDependencies, false}, + {Credo.Check.Refactor.NegatedIsNil, false}, + {Credo.Check.Refactor.PipeChainStart, false}, + {Credo.Check.Refactor.VariableRebinding, false}, + {Credo.Check.Warning.LeakyEnvironment, false}, + {Credo.Check.Warning.MapGetUnsafePass, false}, + {Credo.Check.Warning.UnsafeToAtom, false} + + # + # Custom checks can be created using `mix credo.gen.check`. + # + ] + } + ] +} diff --git a/.formatter.exs b/.formatter.exs new file mode 100644 index 0000000..18c9078 --- /dev/null +++ b/.formatter.exs @@ -0,0 +1,45 @@ +spark_locals_without_parens = [ + base_filter_sql: 1, + code?: 1, + deferrable: 1, + down: 1, + exclusion_constraint_names: 1, + foreign_key_names: 1, + identity_index_names: 1, + ignore?: 1, + include: 1, + index: 1, + index: 2, + message: 1, + migrate?: 1, + migration_defaults: 1, + migration_ignore_attributes: 1, + migration_types: 1, + name: 1, + on_delete: 1, + on_update: 1, + polymorphic?: 1, + polymorphic_name: 1, + polymorphic_on_delete: 1, + polymorphic_on_update: 1, + reference: 1, + reference: 2, + repo: 1, + skip_unique_indexes: 1, + statement: 1, + statement: 2, + table: 1, + unique: 1, + unique_index_names: 1, + up: 1, + using: 1, + where: 1 +] + +[ + inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"], + locals_without_parens: spark_locals_without_parens, + export: [ + locals_without_parens: spark_locals_without_parens + ] +] diff --git a/.git.orig/FETCH_HEAD b/.git.orig/FETCH_HEAD new file mode 100644 index 0000000..9ed37e0 --- /dev/null +++ b/.git.orig/FETCH_HEAD @@ -0,0 +1,2 @@ +58b07aeebdc1b4a0da7eb94a92e1bc655ec19b50 branch 'main' of github.com:ash-project/ash_sqlite +5609cc7941235f99ae9a3ab723fa1c0855c62351 not-for-merge tag 'v0.1.2' of github.com:ash-project/ash_sqlite diff --git a/.git.orig/HEAD b/.git.orig/HEAD new file mode 100644 index 0000000..b870d82 --- /dev/null +++ b/.git.orig/HEAD @@ -0,0 +1 @@ +ref: refs/heads/main diff --git a/.git.orig/ORIG_HEAD b/.git.orig/ORIG_HEAD new file mode 100644 index 0000000..eac8339 --- /dev/null +++ b/.git.orig/ORIG_HEAD @@ -0,0 +1 @@ +4e9cff586684056eb71ae7acad0fdbd7b5db541c diff --git a/.git.orig/config b/.git.orig/config new file mode 100644 index 0000000..9d03d1a --- /dev/null +++ b/.git.orig/config @@ -0,0 +1,12 @@ +[core] + repositoryformatversion = 0 + filemode = true + bare = false + logallrefupdates = true + ignorecase = true +[remote "origin"] + url = git@github.com:ash-project/ash_sqlite.git + fetch = +refs/heads/*:refs/remotes/origin/* +[branch "main"] + remote = origin + merge = refs/heads/main diff --git a/.git.orig/description b/.git.orig/description new file mode 100644 index 0000000..498b267 --- /dev/null +++ b/.git.orig/description @@ -0,0 +1 @@ +Unnamed repository; edit this file 'description' to name the repository. diff --git a/.git.orig/hooks/applypatch-msg.sample b/.git.orig/hooks/applypatch-msg.sample new file mode 100755 index 0000000..a5d7b84 --- /dev/null +++ b/.git.orig/hooks/applypatch-msg.sample @@ -0,0 +1,15 @@ +#!/bin/sh +# +# An example hook script to check the commit log message taken by +# applypatch from an e-mail message. +# +# The hook should exit with non-zero status after issuing an +# appropriate message if it wants to stop the commit. The hook is +# allowed to edit the commit message file. +# +# To enable this hook, rename this file to "applypatch-msg". + +. git-sh-setup +commitmsg="$(git rev-parse --git-path hooks/commit-msg)" +test -x "$commitmsg" && exec "$commitmsg" ${1+"$@"} +: diff --git a/.git.orig/hooks/commit-msg.sample b/.git.orig/hooks/commit-msg.sample new file mode 100755 index 0000000..b58d118 --- /dev/null +++ b/.git.orig/hooks/commit-msg.sample @@ -0,0 +1,24 @@ +#!/bin/sh +# +# An example hook script to check the commit log message. +# Called by "git commit" with one argument, the name of the file +# that has the commit message. The hook should exit with non-zero +# status after issuing an appropriate message if it wants to stop the +# commit. The hook is allowed to edit the commit message file. +# +# To enable this hook, rename this file to "commit-msg". + +# Uncomment the below to add a Signed-off-by line to the message. +# Doing this in a hook is a bad idea in general, but the prepare-commit-msg +# hook is more suited to it. +# +# SOB=$(git var GIT_AUTHOR_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') +# grep -qs "^$SOB" "$1" || echo "$SOB" >> "$1" + +# This example catches duplicate Signed-off-by lines. + +test "" = "$(grep '^Signed-off-by: ' "$1" | + sort | uniq -c | sed -e '/^[ ]*1[ ]/d')" || { + echo >&2 Duplicate Signed-off-by lines. + exit 1 +} diff --git a/.git.orig/hooks/fsmonitor-watchman.sample b/.git.orig/hooks/fsmonitor-watchman.sample new file mode 100755 index 0000000..23e856f --- /dev/null +++ b/.git.orig/hooks/fsmonitor-watchman.sample @@ -0,0 +1,174 @@ +#!/usr/bin/perl + +use strict; +use warnings; +use IPC::Open2; + +# An example hook script to integrate Watchman +# (https://facebook.github.io/watchman/) with git to speed up detecting +# new and modified files. +# +# The hook is passed a version (currently 2) and last update token +# formatted as a string and outputs to stdout a new update token and +# all files that have been modified since the update token. Paths must +# be relative to the root of the working tree and separated by a single NUL. +# +# To enable this hook, rename this file to "query-watchman" and set +# 'git config core.fsmonitor .git/hooks/query-watchman' +# +my ($version, $last_update_token) = @ARGV; + +# Uncomment for debugging +# print STDERR "$0 $version $last_update_token\n"; + +# Check the hook interface version +if ($version ne 2) { + die "Unsupported query-fsmonitor hook version '$version'.\n" . + "Falling back to scanning...\n"; +} + +my $git_work_tree = get_working_dir(); + +my $retry = 1; + +my $json_pkg; +eval { + require JSON::XS; + $json_pkg = "JSON::XS"; + 1; +} or do { + require JSON::PP; + $json_pkg = "JSON::PP"; +}; + +launch_watchman(); + +sub launch_watchman { + my $o = watchman_query(); + if (is_work_tree_watched($o)) { + output_result($o->{clock}, @{$o->{files}}); + } +} + +sub output_result { + my ($clockid, @files) = @_; + + # Uncomment for debugging watchman output + # open (my $fh, ">", ".git/watchman-output.out"); + # binmode $fh, ":utf8"; + # print $fh "$clockid\n@files\n"; + # close $fh; + + binmode STDOUT, ":utf8"; + print $clockid; + print "\0"; + local $, = "\0"; + print @files; +} + +sub watchman_clock { + my $response = qx/watchman clock "$git_work_tree"/; + die "Failed to get clock id on '$git_work_tree'.\n" . + "Falling back to scanning...\n" if $? != 0; + + return $json_pkg->new->utf8->decode($response); +} + +sub watchman_query { + my $pid = open2(\*CHLD_OUT, \*CHLD_IN, 'watchman -j --no-pretty') + or die "open2() failed: $!\n" . + "Falling back to scanning...\n"; + + # In the query expression below we're asking for names of files that + # changed since $last_update_token but not from the .git folder. + # + # To accomplish this, we're using the "since" generator to use the + # recency index to select candidate nodes and "fields" to limit the + # output to file names only. Then we're using the "expression" term to + # further constrain the results. + my $last_update_line = ""; + if (substr($last_update_token, 0, 1) eq "c") { + $last_update_token = "\"$last_update_token\""; + $last_update_line = qq[\n"since": $last_update_token,]; + } + my $query = <<" END"; + ["query", "$git_work_tree", {$last_update_line + "fields": ["name"], + "expression": ["not", ["dirname", ".git"]] + }] + END + + # Uncomment for debugging the watchman query + # open (my $fh, ">", ".git/watchman-query.json"); + # print $fh $query; + # close $fh; + + print CHLD_IN $query; + close CHLD_IN; + my $response = do {local $/; }; + + # Uncomment for debugging the watch response + # open ($fh, ">", ".git/watchman-response.json"); + # print $fh $response; + # close $fh; + + die "Watchman: command returned no output.\n" . + "Falling back to scanning...\n" if $response eq ""; + die "Watchman: command returned invalid output: $response\n" . + "Falling back to scanning...\n" unless $response =~ /^\{/; + + return $json_pkg->new->utf8->decode($response); +} + +sub is_work_tree_watched { + my ($output) = @_; + my $error = $output->{error}; + if ($retry > 0 and $error and $error =~ m/unable to resolve root .* directory (.*) is not watched/) { + $retry--; + my $response = qx/watchman watch "$git_work_tree"/; + die "Failed to make watchman watch '$git_work_tree'.\n" . + "Falling back to scanning...\n" if $? != 0; + $output = $json_pkg->new->utf8->decode($response); + $error = $output->{error}; + die "Watchman: $error.\n" . + "Falling back to scanning...\n" if $error; + + # Uncomment for debugging watchman output + # open (my $fh, ">", ".git/watchman-output.out"); + # close $fh; + + # Watchman will always return all files on the first query so + # return the fast "everything is dirty" flag to git and do the + # Watchman query just to get it over with now so we won't pay + # the cost in git to look up each individual file. + my $o = watchman_clock(); + $error = $output->{error}; + + die "Watchman: $error.\n" . + "Falling back to scanning...\n" if $error; + + output_result($o->{clock}, ("/")); + $last_update_token = $o->{clock}; + + eval { launch_watchman() }; + return 0; + } + + die "Watchman: $error.\n" . + "Falling back to scanning...\n" if $error; + + return 1; +} + +sub get_working_dir { + my $working_dir; + if ($^O =~ 'msys' || $^O =~ 'cygwin') { + $working_dir = Win32::GetCwd(); + $working_dir =~ tr/\\/\//; + } else { + require Cwd; + $working_dir = Cwd::cwd(); + } + + return $working_dir; +} diff --git a/.git.orig/hooks/post-update.sample b/.git.orig/hooks/post-update.sample new file mode 100755 index 0000000..ec17ec1 --- /dev/null +++ b/.git.orig/hooks/post-update.sample @@ -0,0 +1,8 @@ +#!/bin/sh +# +# An example hook script to prepare a packed repository for use over +# dumb transports. +# +# To enable this hook, rename this file to "post-update". + +exec git update-server-info diff --git a/.git.orig/hooks/pre-applypatch.sample b/.git.orig/hooks/pre-applypatch.sample new file mode 100755 index 0000000..4142082 --- /dev/null +++ b/.git.orig/hooks/pre-applypatch.sample @@ -0,0 +1,14 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed +# by applypatch from an e-mail message. +# +# The hook should exit with non-zero status after issuing an +# appropriate message if it wants to stop the commit. +# +# To enable this hook, rename this file to "pre-applypatch". + +. git-sh-setup +precommit="$(git rev-parse --git-path hooks/pre-commit)" +test -x "$precommit" && exec "$precommit" ${1+"$@"} +: diff --git a/.git.orig/hooks/pre-commit.sample b/.git.orig/hooks/pre-commit.sample new file mode 100755 index 0000000..29ed5ee --- /dev/null +++ b/.git.orig/hooks/pre-commit.sample @@ -0,0 +1,49 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed. +# Called by "git commit" with no arguments. The hook should +# exit with non-zero status after issuing an appropriate message if +# it wants to stop the commit. +# +# To enable this hook, rename this file to "pre-commit". + +if git rev-parse --verify HEAD >/dev/null 2>&1 +then + against=HEAD +else + # Initial commit: diff against an empty tree object + against=$(git hash-object -t tree /dev/null) +fi + +# If you want to allow non-ASCII filenames set this variable to true. +allownonascii=$(git config --type=bool hooks.allownonascii) + +# Redirect output to stderr. +exec 1>&2 + +# Cross platform projects tend to avoid non-ASCII filenames; prevent +# them from being added to the repository. We exploit the fact that the +# printable range starts at the space character and ends with tilde. +if [ "$allownonascii" != "true" ] && + # Note that the use of brackets around a tr range is ok here, (it's + # even required, for portability to Solaris 10's /usr/bin/tr), since + # the square bracket bytes happen to fall in the designated range. + test $(git diff-index --cached --name-only --diff-filter=A -z $against | + LC_ALL=C tr -d '[ -~]\0' | wc -c) != 0 +then + cat <<\EOF +Error: Attempt to add a non-ASCII file name. + +This can cause problems if you want to work with people on other platforms. + +To be portable it is advisable to rename the file. + +If you know what you are doing you can disable this check using: + + git config hooks.allownonascii true +EOF + exit 1 +fi + +# If there are whitespace errors, print the offending file names and fail. +exec git diff-index --check --cached $against -- diff --git a/.git.orig/hooks/pre-merge-commit.sample b/.git.orig/hooks/pre-merge-commit.sample new file mode 100755 index 0000000..399eab1 --- /dev/null +++ b/.git.orig/hooks/pre-merge-commit.sample @@ -0,0 +1,13 @@ +#!/bin/sh +# +# An example hook script to verify what is about to be committed. +# Called by "git merge" with no arguments. The hook should +# exit with non-zero status after issuing an appropriate message to +# stderr if it wants to stop the merge commit. +# +# To enable this hook, rename this file to "pre-merge-commit". + +. git-sh-setup +test -x "$GIT_DIR/hooks/pre-commit" && + exec "$GIT_DIR/hooks/pre-commit" +: diff --git a/.git.orig/hooks/pre-push.sample b/.git.orig/hooks/pre-push.sample new file mode 100755 index 0000000..4ce688d --- /dev/null +++ b/.git.orig/hooks/pre-push.sample @@ -0,0 +1,53 @@ +#!/bin/sh + +# An example hook script to verify what is about to be pushed. Called by "git +# push" after it has checked the remote status, but before anything has been +# pushed. If this script exits with a non-zero status nothing will be pushed. +# +# This hook is called with the following parameters: +# +# $1 -- Name of the remote to which the push is being done +# $2 -- URL to which the push is being done +# +# If pushing without using a named remote those arguments will be equal. +# +# Information about the commits which are being pushed is supplied as lines to +# the standard input in the form: +# +# +# +# This sample shows how to prevent push of commits where the log message starts +# with "WIP" (work in progress). + +remote="$1" +url="$2" + +zero=$(git hash-object --stdin &2 "Found WIP commit in $local_ref, not pushing" + exit 1 + fi + fi +done + +exit 0 diff --git a/.git.orig/hooks/pre-rebase.sample b/.git.orig/hooks/pre-rebase.sample new file mode 100755 index 0000000..6cbef5c --- /dev/null +++ b/.git.orig/hooks/pre-rebase.sample @@ -0,0 +1,169 @@ +#!/bin/sh +# +# Copyright (c) 2006, 2008 Junio C Hamano +# +# The "pre-rebase" hook is run just before "git rebase" starts doing +# its job, and can prevent the command from running by exiting with +# non-zero status. +# +# The hook is called with the following parameters: +# +# $1 -- the upstream the series was forked from. +# $2 -- the branch being rebased (or empty when rebasing the current branch). +# +# This sample shows how to prevent topic branches that are already +# merged to 'next' branch from getting rebased, because allowing it +# would result in rebasing already published history. + +publish=next +basebranch="$1" +if test "$#" = 2 +then + topic="refs/heads/$2" +else + topic=`git symbolic-ref HEAD` || + exit 0 ;# we do not interrupt rebasing detached HEAD +fi + +case "$topic" in +refs/heads/??/*) + ;; +*) + exit 0 ;# we do not interrupt others. + ;; +esac + +# Now we are dealing with a topic branch being rebased +# on top of master. Is it OK to rebase it? + +# Does the topic really exist? +git show-ref -q "$topic" || { + echo >&2 "No such branch $topic" + exit 1 +} + +# Is topic fully merged to master? +not_in_master=`git rev-list --pretty=oneline ^master "$topic"` +if test -z "$not_in_master" +then + echo >&2 "$topic is fully merged to master; better remove it." + exit 1 ;# we could allow it, but there is no point. +fi + +# Is topic ever merged to next? If so you should not be rebasing it. +only_next_1=`git rev-list ^master "^$topic" ${publish} | sort` +only_next_2=`git rev-list ^master ${publish} | sort` +if test "$only_next_1" = "$only_next_2" +then + not_in_topic=`git rev-list "^$topic" master` + if test -z "$not_in_topic" + then + echo >&2 "$topic is already up to date with master" + exit 1 ;# we could allow it, but there is no point. + else + exit 0 + fi +else + not_in_next=`git rev-list --pretty=oneline ^${publish} "$topic"` + /usr/bin/perl -e ' + my $topic = $ARGV[0]; + my $msg = "* $topic has commits already merged to public branch:\n"; + my (%not_in_next) = map { + /^([0-9a-f]+) /; + ($1 => 1); + } split(/\n/, $ARGV[1]); + for my $elem (map { + /^([0-9a-f]+) (.*)$/; + [$1 => $2]; + } split(/\n/, $ARGV[2])) { + if (!exists $not_in_next{$elem->[0]}) { + if ($msg) { + print STDERR $msg; + undef $msg; + } + print STDERR " $elem->[1]\n"; + } + } + ' "$topic" "$not_in_next" "$not_in_master" + exit 1 +fi + +<<\DOC_END + +This sample hook safeguards topic branches that have been +published from being rewound. + +The workflow assumed here is: + + * Once a topic branch forks from "master", "master" is never + merged into it again (either directly or indirectly). + + * Once a topic branch is fully cooked and merged into "master", + it is deleted. If you need to build on top of it to correct + earlier mistakes, a new topic branch is created by forking at + the tip of the "master". This is not strictly necessary, but + it makes it easier to keep your history simple. + + * Whenever you need to test or publish your changes to topic + branches, merge them into "next" branch. + +The script, being an example, hardcodes the publish branch name +to be "next", but it is trivial to make it configurable via +$GIT_DIR/config mechanism. + +With this workflow, you would want to know: + +(1) ... if a topic branch has ever been merged to "next". Young + topic branches can have stupid mistakes you would rather + clean up before publishing, and things that have not been + merged into other branches can be easily rebased without + affecting other people. But once it is published, you would + not want to rewind it. + +(2) ... if a topic branch has been fully merged to "master". + Then you can delete it. More importantly, you should not + build on top of it -- other people may already want to + change things related to the topic as patches against your + "master", so if you need further changes, it is better to + fork the topic (perhaps with the same name) afresh from the + tip of "master". + +Let's look at this example: + + o---o---o---o---o---o---o---o---o---o "next" + / / / / + / a---a---b A / / + / / / / + / / c---c---c---c B / + / / / \ / + / / / b---b C \ / + / / / / \ / + ---o---o---o---o---o---o---o---o---o---o---o "master" + + +A, B and C are topic branches. + + * A has one fix since it was merged up to "next". + + * B has finished. It has been fully merged up to "master" and "next", + and is ready to be deleted. + + * C has not merged to "next" at all. + +We would want to allow C to be rebased, refuse A, and encourage +B to be deleted. + +To compute (1): + + git rev-list ^master ^topic next + git rev-list ^master next + + if these match, topic has not merged in next at all. + +To compute (2): + + git rev-list master..topic + + if this is empty, it is fully merged to "master". + +DOC_END diff --git a/.git.orig/hooks/pre-receive.sample b/.git.orig/hooks/pre-receive.sample new file mode 100755 index 0000000..a1fd29e --- /dev/null +++ b/.git.orig/hooks/pre-receive.sample @@ -0,0 +1,24 @@ +#!/bin/sh +# +# An example hook script to make use of push options. +# The example simply echoes all push options that start with 'echoback=' +# and rejects all pushes when the "reject" push option is used. +# +# To enable this hook, rename this file to "pre-receive". + +if test -n "$GIT_PUSH_OPTION_COUNT" +then + i=0 + while test "$i" -lt "$GIT_PUSH_OPTION_COUNT" + do + eval "value=\$GIT_PUSH_OPTION_$i" + case "$value" in + echoback=*) + echo "echo from the pre-receive-hook: ${value#*=}" >&2 + ;; + reject) + exit 1 + esac + i=$((i + 1)) + done +fi diff --git a/.git.orig/hooks/prepare-commit-msg.sample b/.git.orig/hooks/prepare-commit-msg.sample new file mode 100755 index 0000000..10fa14c --- /dev/null +++ b/.git.orig/hooks/prepare-commit-msg.sample @@ -0,0 +1,42 @@ +#!/bin/sh +# +# An example hook script to prepare the commit log message. +# Called by "git commit" with the name of the file that has the +# commit message, followed by the description of the commit +# message's source. The hook's purpose is to edit the commit +# message file. If the hook fails with a non-zero status, +# the commit is aborted. +# +# To enable this hook, rename this file to "prepare-commit-msg". + +# This hook includes three examples. The first one removes the +# "# Please enter the commit message..." help message. +# +# The second includes the output of "git diff --name-status -r" +# into the message, just before the "git status" output. It is +# commented because it doesn't cope with --amend or with squashed +# commits. +# +# The third example adds a Signed-off-by line to the message, that can +# still be edited. This is rarely a good idea. + +COMMIT_MSG_FILE=$1 +COMMIT_SOURCE=$2 +SHA1=$3 + +/usr/bin/perl -i.bak -ne 'print unless(m/^. Please enter the commit message/..m/^#$/)' "$COMMIT_MSG_FILE" + +# case "$COMMIT_SOURCE,$SHA1" in +# ,|template,) +# /usr/bin/perl -i.bak -pe ' +# print "\n" . `git diff --cached --name-status -r` +# if /^#/ && $first++ == 0' "$COMMIT_MSG_FILE" ;; +# *) ;; +# esac + +# SOB=$(git var GIT_COMMITTER_IDENT | sed -n 's/^\(.*>\).*$/Signed-off-by: \1/p') +# git interpret-trailers --in-place --trailer "$SOB" "$COMMIT_MSG_FILE" +# if test -z "$COMMIT_SOURCE" +# then +# /usr/bin/perl -i.bak -pe 'print "\n" if !$first_line++' "$COMMIT_MSG_FILE" +# fi diff --git a/.git.orig/hooks/push-to-checkout.sample b/.git.orig/hooks/push-to-checkout.sample new file mode 100755 index 0000000..af5a0c0 --- /dev/null +++ b/.git.orig/hooks/push-to-checkout.sample @@ -0,0 +1,78 @@ +#!/bin/sh + +# An example hook script to update a checked-out tree on a git push. +# +# This hook is invoked by git-receive-pack(1) when it reacts to git +# push and updates reference(s) in its repository, and when the push +# tries to update the branch that is currently checked out and the +# receive.denyCurrentBranch configuration variable is set to +# updateInstead. +# +# By default, such a push is refused if the working tree and the index +# of the remote repository has any difference from the currently +# checked out commit; when both the working tree and the index match +# the current commit, they are updated to match the newly pushed tip +# of the branch. This hook is to be used to override the default +# behaviour; however the code below reimplements the default behaviour +# as a starting point for convenient modification. +# +# The hook receives the commit with which the tip of the current +# branch is going to be updated: +commit=$1 + +# It can exit with a non-zero status to refuse the push (when it does +# so, it must not modify the index or the working tree). +die () { + echo >&2 "$*" + exit 1 +} + +# Or it can make any necessary changes to the working tree and to the +# index to bring them to the desired state when the tip of the current +# branch is updated to the new commit, and exit with a zero status. +# +# For example, the hook can simply run git read-tree -u -m HEAD "$1" +# in order to emulate git fetch that is run in the reverse direction +# with git push, as the two-tree form of git read-tree -u -m is +# essentially the same as git switch or git checkout that switches +# branches while keeping the local changes in the working tree that do +# not interfere with the difference between the branches. + +# The below is a more-or-less exact translation to shell of the C code +# for the default behaviour for git's push-to-checkout hook defined in +# the push_to_deploy() function in builtin/receive-pack.c. +# +# Note that the hook will be executed from the repository directory, +# not from the working tree, so if you want to perform operations on +# the working tree, you will have to adapt your code accordingly, e.g. +# by adding "cd .." or using relative paths. + +if ! git update-index -q --ignore-submodules --refresh +then + die "Up-to-date check failed" +fi + +if ! git diff-files --quiet --ignore-submodules -- +then + die "Working directory has unstaged changes" +fi + +# This is a rough translation of: +# +# head_has_history() ? "HEAD" : EMPTY_TREE_SHA1_HEX +if git cat-file -e HEAD 2>/dev/null +then + head=HEAD +else + head=$(git hash-object -t tree --stdin &2 + exit 1 +} + +unset GIT_DIR GIT_WORK_TREE +cd "$worktree" && + +if grep -q "^diff --git " "$1" +then + validate_patch "$1" +else + validate_cover_letter "$1" +fi && + +if test "$GIT_SENDEMAIL_FILE_COUNTER" = "$GIT_SENDEMAIL_FILE_TOTAL" +then + git config --unset-all sendemail.validateWorktree && + trap 'git worktree remove -ff "$worktree"' EXIT && + validate_series +fi diff --git a/.git.orig/hooks/update.sample b/.git.orig/hooks/update.sample new file mode 100755 index 0000000..c4d426b --- /dev/null +++ b/.git.orig/hooks/update.sample @@ -0,0 +1,128 @@ +#!/bin/sh +# +# An example hook script to block unannotated tags from entering. +# Called by "git receive-pack" with arguments: refname sha1-old sha1-new +# +# To enable this hook, rename this file to "update". +# +# Config +# ------ +# hooks.allowunannotated +# This boolean sets whether unannotated tags will be allowed into the +# repository. By default they won't be. +# hooks.allowdeletetag +# This boolean sets whether deleting tags will be allowed in the +# repository. By default they won't be. +# hooks.allowmodifytag +# This boolean sets whether a tag may be modified after creation. By default +# it won't be. +# hooks.allowdeletebranch +# This boolean sets whether deleting branches will be allowed in the +# repository. By default they won't be. +# hooks.denycreatebranch +# This boolean sets whether remotely creating branches will be denied +# in the repository. By default this is allowed. +# + +# --- Command line +refname="$1" +oldrev="$2" +newrev="$3" + +# --- Safety check +if [ -z "$GIT_DIR" ]; then + echo "Don't run this script from the command line." >&2 + echo " (if you want, you could supply GIT_DIR then run" >&2 + echo " $0 )" >&2 + exit 1 +fi + +if [ -z "$refname" -o -z "$oldrev" -o -z "$newrev" ]; then + echo "usage: $0 " >&2 + exit 1 +fi + +# --- Config +allowunannotated=$(git config --type=bool hooks.allowunannotated) +allowdeletebranch=$(git config --type=bool hooks.allowdeletebranch) +denycreatebranch=$(git config --type=bool hooks.denycreatebranch) +allowdeletetag=$(git config --type=bool hooks.allowdeletetag) +allowmodifytag=$(git config --type=bool hooks.allowmodifytag) + +# check for no description +projectdesc=$(sed -e '1q' "$GIT_DIR/description") +case "$projectdesc" in +"Unnamed repository"* | "") + echo "*** Project description file hasn't been set" >&2 + exit 1 + ;; +esac + +# --- Check types +# if $newrev is 0000...0000, it's a commit to delete a ref. +zero=$(git hash-object --stdin &2 + echo "*** Use 'git tag [ -a | -s ]' for tags you want to propagate." >&2 + exit 1 + fi + ;; + refs/tags/*,delete) + # delete tag + if [ "$allowdeletetag" != "true" ]; then + echo "*** Deleting a tag is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/tags/*,tag) + # annotated tag + if [ "$allowmodifytag" != "true" ] && git rev-parse $refname > /dev/null 2>&1 + then + echo "*** Tag '$refname' already exists." >&2 + echo "*** Modifying a tag is not allowed in this repository." >&2 + exit 1 + fi + ;; + refs/heads/*,commit) + # branch + if [ "$oldrev" = "$zero" -a "$denycreatebranch" = "true" ]; then + echo "*** Creating a branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/heads/*,delete) + # delete branch + if [ "$allowdeletebranch" != "true" ]; then + echo "*** Deleting a branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + refs/remotes/*,commit) + # tracking branch + ;; + refs/remotes/*,delete) + # delete tracking branch + if [ "$allowdeletebranch" != "true" ]; then + echo "*** Deleting a tracking branch is not allowed in this repository" >&2 + exit 1 + fi + ;; + *) + # Anything else (is there anything else?) + echo "*** Update hook: unknown type of update to ref $refname of type $newrev_type" >&2 + exit 1 + ;; +esac + +# --- Finished +exit 0 diff --git a/.git.orig/index b/.git.orig/index new file mode 100644 index 0000000..2e8cd4d Binary files /dev/null and b/.git.orig/index differ diff --git a/.git.orig/info/exclude b/.git.orig/info/exclude new file mode 100644 index 0000000..a5196d1 --- /dev/null +++ b/.git.orig/info/exclude @@ -0,0 +1,6 @@ +# git ls-files --others --exclude-from=.git/info/exclude +# Lines that start with '#' are comments. +# For a project mostly in C, the following would be a good set of +# exclude patterns (uncomment them if you want to use them): +# *.[oa] +# *~ diff --git a/.git.orig/logs/HEAD b/.git.orig/logs/HEAD new file mode 100644 index 0000000..8d141e4 --- /dev/null +++ b/.git.orig/logs/HEAD @@ -0,0 +1,2 @@ +0000000000000000000000000000000000000000 4e9cff586684056eb71ae7acad0fdbd7b5db541c Joel Kociolek 1715158016 +0300 clone: from github.com:ash-project/ash_sqlite.git +4e9cff586684056eb71ae7acad0fdbd7b5db541c 58b07aeebdc1b4a0da7eb94a92e1bc655ec19b50 Joel Kociolek 1715508288 +0300 pull --autostash: Fast-forward diff --git a/.git.orig/logs/refs/heads/main b/.git.orig/logs/refs/heads/main new file mode 100644 index 0000000..8d141e4 --- /dev/null +++ b/.git.orig/logs/refs/heads/main @@ -0,0 +1,2 @@ +0000000000000000000000000000000000000000 4e9cff586684056eb71ae7acad0fdbd7b5db541c Joel Kociolek 1715158016 +0300 clone: from github.com:ash-project/ash_sqlite.git +4e9cff586684056eb71ae7acad0fdbd7b5db541c 58b07aeebdc1b4a0da7eb94a92e1bc655ec19b50 Joel Kociolek 1715508288 +0300 pull --autostash: Fast-forward diff --git a/.git.orig/logs/refs/remotes/origin/HEAD b/.git.orig/logs/refs/remotes/origin/HEAD new file mode 100644 index 0000000..467dda9 --- /dev/null +++ b/.git.orig/logs/refs/remotes/origin/HEAD @@ -0,0 +1 @@ +0000000000000000000000000000000000000000 4e9cff586684056eb71ae7acad0fdbd7b5db541c Joel Kociolek 1715158016 +0300 clone: from github.com:ash-project/ash_sqlite.git diff --git a/.git.orig/logs/refs/remotes/origin/main b/.git.orig/logs/refs/remotes/origin/main new file mode 100644 index 0000000..6baeafe --- /dev/null +++ b/.git.orig/logs/refs/remotes/origin/main @@ -0,0 +1 @@ +4e9cff586684056eb71ae7acad0fdbd7b5db541c 58b07aeebdc1b4a0da7eb94a92e1bc655ec19b50 Joel Kociolek 1715508288 +0300 pull --autostash: fast-forward diff --git a/.git.orig/objects/04/67fd09bbd2872c1e661f3cf0c273ef9aee72ae b/.git.orig/objects/04/67fd09bbd2872c1e661f3cf0c273ef9aee72ae new file mode 100644 index 0000000..f856ed2 Binary files /dev/null and b/.git.orig/objects/04/67fd09bbd2872c1e661f3cf0c273ef9aee72ae differ diff --git a/.git.orig/objects/0f/4042338f7a55716550278872a2ce096b089162 b/.git.orig/objects/0f/4042338f7a55716550278872a2ce096b089162 new file mode 100644 index 0000000..aa4fa63 Binary files /dev/null and b/.git.orig/objects/0f/4042338f7a55716550278872a2ce096b089162 differ diff --git a/.git.orig/objects/20/a3c392517ad8f3e00a75d350bc8e9f55afd23c b/.git.orig/objects/20/a3c392517ad8f3e00a75d350bc8e9f55afd23c new file mode 100644 index 0000000..cb44b54 --- /dev/null +++ b/.git.orig/objects/20/a3c392517ad8f3e00a75d350bc8e9f55afd23c @@ -0,0 +1,4 @@ +x}SIϣF̙_\fsL4f1`(hh 66q)RIUJ.Ga ČńXij9$gc\@LQڤ'DL[O"$i<Ù s,Jdi-Kp3_7VEa_@s9Hg!8|,}Ir' Ms{3kp^)8(6,I"ISP +ąE%IR6bD<1:phk[3$:f*b&1nͧyv)lF2iOh3UJ0ON(dkz?a#PƘׯ&~`tgu}\שt;E7S)Jg97-Ti8x}X#]ה;C꽪x.*w KZe y6񾛋&k5;4%u'w; w N\F8 +Wba:5 rב*C8wf<`z~`;|nA2 ]@k~ ?ϟ~ud8 \ No newline at end of file diff --git a/.git.orig/objects/23/b17f5fb882b1845b1dd3bd276509d086f658e9 b/.git.orig/objects/23/b17f5fb882b1845b1dd3bd276509d086f658e9 new file mode 100644 index 0000000..7c873b6 Binary files /dev/null and b/.git.orig/objects/23/b17f5fb882b1845b1dd3bd276509d086f658e9 differ diff --git a/.git.orig/objects/2b/ed9af095c876ef9131468f6305f761e51c7869 b/.git.orig/objects/2b/ed9af095c876ef9131468f6305f761e51c7869 new file mode 100644 index 0000000..98c630c Binary files /dev/null and b/.git.orig/objects/2b/ed9af095c876ef9131468f6305f761e51c7869 differ diff --git a/.git.orig/objects/3c/75dddf64c953dead4c6e27a9b8d299b013c471 b/.git.orig/objects/3c/75dddf64c953dead4c6e27a9b8d299b013c471 new file mode 100644 index 0000000..153878b Binary files /dev/null and b/.git.orig/objects/3c/75dddf64c953dead4c6e27a9b8d299b013c471 differ diff --git a/.git.orig/objects/40/8676a416aa889991156047b423a4def5f4625c b/.git.orig/objects/40/8676a416aa889991156047b423a4def5f4625c new file mode 100644 index 0000000..2efba32 Binary files /dev/null and b/.git.orig/objects/40/8676a416aa889991156047b423a4def5f4625c differ diff --git a/.git.orig/objects/4c/a02b3951a7edd86698d6becdbaf38a2e0f2e30 b/.git.orig/objects/4c/a02b3951a7edd86698d6becdbaf38a2e0f2e30 new file mode 100644 index 0000000..08cf74f Binary files /dev/null and b/.git.orig/objects/4c/a02b3951a7edd86698d6becdbaf38a2e0f2e30 differ diff --git a/.git.orig/objects/56/09cc7941235f99ae9a3ab723fa1c0855c62351 b/.git.orig/objects/56/09cc7941235f99ae9a3ab723fa1c0855c62351 new file mode 100644 index 0000000..f314635 --- /dev/null +++ b/.git.orig/objects/56/09cc7941235f99ae9a3ab723fa1c0855c62351 @@ -0,0 +1,2 @@ +x5N09)|l׎Bx +BR:K%xz".Y7C/7آ6hatSp66n;_{a O:¸Բء1א>Gmlu]坯j.0q°?@J# _QPA,y*ozFBUNKȨOS$宎`XNh}dVI \ No newline at end of file diff --git a/.git.orig/objects/56/1018539ac44bb4e74ab4ab3fae65d93bfde262 b/.git.orig/objects/56/1018539ac44bb4e74ab4ab3fae65d93bfde262 new file mode 100644 index 0000000..59acbb3 Binary files /dev/null and b/.git.orig/objects/56/1018539ac44bb4e74ab4ab3fae65d93bfde262 differ diff --git a/.git.orig/objects/58/b07aeebdc1b4a0da7eb94a92e1bc655ec19b50 b/.git.orig/objects/58/b07aeebdc1b4a0da7eb94a92e1bc655ec19b50 new file mode 100644 index 0000000..52d5cb8 Binary files /dev/null and b/.git.orig/objects/58/b07aeebdc1b4a0da7eb94a92e1bc655ec19b50 differ diff --git a/.git.orig/objects/58/e3843da16dab7e0167bae41fb18d787fd78d1c b/.git.orig/objects/58/e3843da16dab7e0167bae41fb18d787fd78d1c new file mode 100644 index 0000000..a76f292 Binary files /dev/null and b/.git.orig/objects/58/e3843da16dab7e0167bae41fb18d787fd78d1c differ diff --git a/.git.orig/objects/5c/f1b8583f408862afb161ae09fc7634a46f34a7 b/.git.orig/objects/5c/f1b8583f408862afb161ae09fc7634a46f34a7 new file mode 100644 index 0000000..cb62dc7 Binary files /dev/null and b/.git.orig/objects/5c/f1b8583f408862afb161ae09fc7634a46f34a7 differ diff --git a/.git.orig/objects/5f/49882907b36d837a577593928eca824b5547d3 b/.git.orig/objects/5f/49882907b36d837a577593928eca824b5547d3 new file mode 100644 index 0000000..e39ba5c Binary files /dev/null and b/.git.orig/objects/5f/49882907b36d837a577593928eca824b5547d3 differ diff --git a/.git.orig/objects/6c/9f5050a116ece485f0eb8914d98425e049bd92 b/.git.orig/objects/6c/9f5050a116ece485f0eb8914d98425e049bd92 new file mode 100644 index 0000000..dac16f4 Binary files /dev/null and b/.git.orig/objects/6c/9f5050a116ece485f0eb8914d98425e049bd92 differ diff --git a/.git.orig/objects/7a/06bde184c49014feb523e9a54b726df8680fc2 b/.git.orig/objects/7a/06bde184c49014feb523e9a54b726df8680fc2 new file mode 100644 index 0000000..09f5ef0 --- /dev/null +++ b/.git.orig/objects/7a/06bde184c49014feb523e9a54b726df8680fc2 @@ -0,0 +1,2 @@ +xNI0 WTqj B$Z ZB4frOѮURزNj)OR98sr1hY:4$c`3G#:.F< Bu=ȭgvdIU*T +̰g>Lj \ No newline at end of file diff --git a/.git.orig/objects/7a/dbb168debec28b54ccdaac3a082e52eb78bae7 b/.git.orig/objects/7a/dbb168debec28b54ccdaac3a082e52eb78bae7 new file mode 100644 index 0000000..3e36d29 Binary files /dev/null and b/.git.orig/objects/7a/dbb168debec28b54ccdaac3a082e52eb78bae7 differ diff --git a/.git.orig/objects/85/0db890df9cb03de53e701189b775866254437e b/.git.orig/objects/85/0db890df9cb03de53e701189b775866254437e new file mode 100644 index 0000000..1802e2f Binary files /dev/null and b/.git.orig/objects/85/0db890df9cb03de53e701189b775866254437e differ diff --git a/.git.orig/objects/8f/d882434ce2289e9789684588c0ce48cb1c4657 b/.git.orig/objects/8f/d882434ce2289e9789684588c0ce48cb1c4657 new file mode 100644 index 0000000..1eb3ffa Binary files /dev/null and b/.git.orig/objects/8f/d882434ce2289e9789684588c0ce48cb1c4657 differ diff --git a/.git.orig/objects/a9/3196823aa13eb6bcf524b9fed8bffc97f0a4e9 b/.git.orig/objects/a9/3196823aa13eb6bcf524b9fed8bffc97f0a4e9 new file mode 100644 index 0000000..57c284e Binary files /dev/null and b/.git.orig/objects/a9/3196823aa13eb6bcf524b9fed8bffc97f0a4e9 differ diff --git a/.git.orig/objects/b3/2de3bf69b67f9442f32a615efa5ae8a470ed10 b/.git.orig/objects/b3/2de3bf69b67f9442f32a615efa5ae8a470ed10 new file mode 100644 index 0000000..7854e15 Binary files /dev/null and b/.git.orig/objects/b3/2de3bf69b67f9442f32a615efa5ae8a470ed10 differ diff --git a/.git.orig/objects/b8/92bd45591435a61d4fc57b954841a8bfdd5a81 b/.git.orig/objects/b8/92bd45591435a61d4fc57b954841a8bfdd5a81 new file mode 100644 index 0000000..1e6d69d --- /dev/null +++ b/.git.orig/objects/b8/92bd45591435a61d4fc57b954841a8bfdd5a81 @@ -0,0 +1 @@ +xAn0 s+eFRP=$HU{@. ,Xm ;X:*qcfC&(/Y}Dfɶ$G̖G-^'fs[StY~׿`o1GX*~r5>βNpd;Ly \ No newline at end of file diff --git a/.git.orig/objects/b8/c859533235b2d9acf2541de0619342983e2192 b/.git.orig/objects/b8/c859533235b2d9acf2541de0619342983e2192 new file mode 100644 index 0000000..5802440 Binary files /dev/null and b/.git.orig/objects/b8/c859533235b2d9acf2541de0619342983e2192 differ diff --git a/.git.orig/objects/bb/f1d65aa293a49f5da080c0a91594a4c2cfdebf b/.git.orig/objects/bb/f1d65aa293a49f5da080c0a91594a4c2cfdebf new file mode 100644 index 0000000..7acd9a8 Binary files /dev/null and b/.git.orig/objects/bb/f1d65aa293a49f5da080c0a91594a4c2cfdebf differ diff --git a/.git.orig/objects/be/e6119cdeea9728ec3144ccbb18bc1d8a239deb b/.git.orig/objects/be/e6119cdeea9728ec3144ccbb18bc1d8a239deb new file mode 100644 index 0000000..21aa1da Binary files /dev/null and b/.git.orig/objects/be/e6119cdeea9728ec3144ccbb18bc1d8a239deb differ diff --git a/.git.orig/objects/c4/6d55c9446904e699f35a48892de809f4061166 b/.git.orig/objects/c4/6d55c9446904e699f35a48892de809f4061166 new file mode 100644 index 0000000..8bee9a1 --- /dev/null +++ b/.git.orig/objects/c4/6d55c9446904e699f35a48892de809f4061166 @@ -0,0 +1,2 @@ +xMj0@unJd:ushLXQ +|՜d_zc+8M #*o,jd539&6. O468W SL>Q$ϵWN/R:V1xZJAϼb)ȸA]?B v z<4^ݫ{Ӆ\g \ No newline at end of file diff --git a/.git.orig/objects/c4/cb735515e9ed67db62710b6998d8eb9af776db b/.git.orig/objects/c4/cb735515e9ed67db62710b6998d8eb9af776db new file mode 100644 index 0000000..9a3b0e5 Binary files /dev/null and b/.git.orig/objects/c4/cb735515e9ed67db62710b6998d8eb9af776db differ diff --git a/.git.orig/objects/c6/78d7bbcbfe040f850a951876de535b837b13f9 b/.git.orig/objects/c6/78d7bbcbfe040f850a951876de535b837b13f9 new file mode 100644 index 0000000..7490590 Binary files /dev/null and b/.git.orig/objects/c6/78d7bbcbfe040f850a951876de535b837b13f9 differ diff --git a/.git.orig/objects/d9/517e6ccbacc258955b752d68a52014a2d8f223 b/.git.orig/objects/d9/517e6ccbacc258955b752d68a52014a2d8f223 new file mode 100644 index 0000000..eb813cc Binary files /dev/null and b/.git.orig/objects/d9/517e6ccbacc258955b752d68a52014a2d8f223 differ diff --git a/.git.orig/objects/df/27b7e0e94f44b672c0c077dcfd666b38300bcb b/.git.orig/objects/df/27b7e0e94f44b672c0c077dcfd666b38300bcb new file mode 100644 index 0000000..e31322d --- /dev/null +++ b/.git.orig/objects/df/27b7e0e94f44b672c0c077dcfd666b38300bcb @@ -0,0 +1,2 @@ +xAJ1])tDĵ.nE{iC|D A +G.Gwˏ#+Vl \ No newline at end of file diff --git a/.git.orig/objects/f6/4965c0271738423a6d10c10be61b2245b628d7 b/.git.orig/objects/f6/4965c0271738423a6d10c10be61b2245b628d7 new file mode 100644 index 0000000..2e38ebe Binary files /dev/null and b/.git.orig/objects/f6/4965c0271738423a6d10c10be61b2245b628d7 differ diff --git a/.git.orig/objects/f6/69686b38ada693234a078c22481a3774e36cf2 b/.git.orig/objects/f6/69686b38ada693234a078c22481a3774e36cf2 new file mode 100644 index 0000000..05e81c9 Binary files /dev/null and b/.git.orig/objects/f6/69686b38ada693234a078c22481a3774e36cf2 differ diff --git a/.git.orig/objects/fc/428bc75ab0198ea2b3ca70cc0311b95381779b b/.git.orig/objects/fc/428bc75ab0198ea2b3ca70cc0311b95381779b new file mode 100644 index 0000000..66c7196 Binary files /dev/null and b/.git.orig/objects/fc/428bc75ab0198ea2b3ca70cc0311b95381779b differ diff --git a/.git.orig/objects/fc/ad7b900f8a691fc0730c61b5befd38aee71e36 b/.git.orig/objects/fc/ad7b900f8a691fc0730c61b5befd38aee71e36 new file mode 100644 index 0000000..beb5748 Binary files /dev/null and b/.git.orig/objects/fc/ad7b900f8a691fc0730c61b5befd38aee71e36 differ diff --git a/.git.orig/objects/fe/94b91c7bccecf0ce98fc393b914f1de4f05a41 b/.git.orig/objects/fe/94b91c7bccecf0ce98fc393b914f1de4f05a41 new file mode 100644 index 0000000..a3756d9 Binary files /dev/null and b/.git.orig/objects/fe/94b91c7bccecf0ce98fc393b914f1de4f05a41 differ diff --git a/.git.orig/objects/pack/pack-011e412ea3833a4bbedf2c966c4755fc019c8240.idx b/.git.orig/objects/pack/pack-011e412ea3833a4bbedf2c966c4755fc019c8240.idx new file mode 100644 index 0000000..f9fd7f7 Binary files /dev/null and b/.git.orig/objects/pack/pack-011e412ea3833a4bbedf2c966c4755fc019c8240.idx differ diff --git a/.git.orig/objects/pack/pack-011e412ea3833a4bbedf2c966c4755fc019c8240.pack b/.git.orig/objects/pack/pack-011e412ea3833a4bbedf2c966c4755fc019c8240.pack new file mode 100644 index 0000000..add4043 Binary files /dev/null and b/.git.orig/objects/pack/pack-011e412ea3833a4bbedf2c966c4755fc019c8240.pack differ diff --git a/.git.orig/objects/pack/pack-011e412ea3833a4bbedf2c966c4755fc019c8240.rev b/.git.orig/objects/pack/pack-011e412ea3833a4bbedf2c966c4755fc019c8240.rev new file mode 100644 index 0000000..ca51b5d Binary files /dev/null and b/.git.orig/objects/pack/pack-011e412ea3833a4bbedf2c966c4755fc019c8240.rev differ diff --git a/.git.orig/packed-refs b/.git.orig/packed-refs new file mode 100644 index 0000000..d11d854 --- /dev/null +++ b/.git.orig/packed-refs @@ -0,0 +1,11 @@ +# pack-refs with: peeled fully-peeled sorted +093c4d14ea55b702407f5e5459239d61a73eef56 refs/remotes/origin/dependabot/hex/ash_sql-0.1.1-rc.20 +4e9cff586684056eb71ae7acad0fdbd7b5db541c refs/remotes/origin/main +05d43bbd8fd196025f7ed6ea3d240d077def047f refs/tags/v0.1.0 +^98343e266d530d44fc5de48f35147103724c3454 +e387d02fc0b11c8e8b928cd08a931a030e8c779a refs/tags/v0.1.1 +^b4ed3806c4ef51c06c682956f24fb2895a0a7e54 +b3e3b6480c1d2dc54d5b227beef9ddf37897c6a8 refs/tags/v0.1.2-rc.0 +^26f94773c09ec7ef009bf0af8357a6f2c0ab2ad9 +364d98a6d77b2089247dfbcf396fb734f682cccc refs/tags/v0.1.2-rc.1 +^f3b100d07f06808dc327345678dc19b309d767dd diff --git a/.git.orig/refs/heads/main b/.git.orig/refs/heads/main new file mode 100644 index 0000000..93f1202 --- /dev/null +++ b/.git.orig/refs/heads/main @@ -0,0 +1 @@ +58b07aeebdc1b4a0da7eb94a92e1bc655ec19b50 diff --git a/.git.orig/refs/remotes/origin/HEAD b/.git.orig/refs/remotes/origin/HEAD new file mode 100644 index 0000000..4b0a875 --- /dev/null +++ b/.git.orig/refs/remotes/origin/HEAD @@ -0,0 +1 @@ +ref: refs/remotes/origin/main diff --git a/.git.orig/refs/remotes/origin/main b/.git.orig/refs/remotes/origin/main new file mode 100644 index 0000000..93f1202 --- /dev/null +++ b/.git.orig/refs/remotes/origin/main @@ -0,0 +1 @@ +58b07aeebdc1b4a0da7eb94a92e1bc655ec19b50 diff --git a/.git.orig/refs/tags/v0.1.2 b/.git.orig/refs/tags/v0.1.2 new file mode 100644 index 0000000..795b2b9 --- /dev/null +++ b/.git.orig/refs/tags/v0.1.2 @@ -0,0 +1 @@ +5609cc7941235f99ae9a3ab723fa1c0855c62351 diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..7aa6f74 --- /dev/null +++ b/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,76 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at zach@zachdaniel.dev. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 0000000..f537454 --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,2 @@ +# Contributing Guidelines +Contributing guidelines can be found in the core project, [ash](https://github.com/ash-project/ash/blob/main/.github/CONTRIBUTING.md) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..1f47341 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,27 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: bug, needs review +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. If you are not sure if the bug is related to `ash` or an extension, log it with [ash](https://github.com/ash-project/ash/issues/new) and we will move it. + +**To Reproduce** +A minimal set of resource definitions and calls that can reproduce the bug. + +**Expected behavior** +A clear and concise description of what you expected to happen. + +** Runtime + - Elixir version + - Erlang version + - OS + - Ash version + - any related extension versions + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..181e583 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,35 @@ +--- +name: Proposal +about: Suggest an idea for this project +title: "" +labels: enhancement, needs review +assignees: "" +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Express the feature either with a change to resource syntax, or with a change to the resource interface** + +For example + +```elixir + attributes do + attribute :foo, :integer, bar: 10 # <- Adding `bar` here would cause + end +``` + +Or + +```elixir + Ash.read(:resource, bar: 10) # <- Adding `bar` here would cause +``` + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..8c13744 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,4 @@ +### Contributor checklist + +- [ ] Bug fixes include regression tests +- [ ] Features include unit/acceptance tests diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..6977f1c --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,6 @@ +version: 2 +updates: +- package-ecosystem: mix + directory: "/" + schedule: + interval: "daily" diff --git a/.github/workflows/elixir.yml b/.github/workflows/elixir.yml new file mode 100644 index 0000000..e015e71 --- /dev/null +++ b/.github/workflows/elixir.yml @@ -0,0 +1,15 @@ +name: CI +on: + push: + tags: + - "v*" + branches: [main] + pull_request: + branches: [main] +jobs: + ash-ci: + uses: ash-project/ash/.github/workflows/ash-ci.yml@main + with: + sqlite: true + secrets: + hex_api_key: ${{ secrets.HEX_API_KEY }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e79954f --- /dev/null +++ b/.gitignore @@ -0,0 +1,30 @@ +# The directory Mix will write compiled artifacts to. +/_build/ + +# If you run "mix test --cover", coverage assets end up here. +/cover/ + +# The directory Mix downloads your dependencies sources to. +/deps/ + +# Where third-party dependencies like ExDoc output generated docs. +/doc/ + +# Ignore .fetch files in case you like to edit your project deps locally. +/.fetch + +# If the VM crashes, it generates a dump, let's ignore it too. +erl_crash.dump + +# Also ignore archive artifacts (built via "mix archive.build"). +*.ez + +# Ignore package tarball (built via "mix hex.build"). +ash_sqlite-*.tar + +test_migration_path +test_snapshots_path + +test/test.db +test/test.db-shm +test/test.db-wal diff --git a/.tool-versions b/.tool-versions new file mode 100644 index 0000000..44acbf0 --- /dev/null +++ b/.tool-versions @@ -0,0 +1,2 @@ +erlang 26.0.2 +elixir 1.15.4 diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..711d535 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "cSpell.words": [ + "mapset", + "instr" + ] +} diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..a931968 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,61 @@ +# Change Log + +All notable changes to this project will be documented in this file. +See [Conventional Commits](Https://conventionalcommits.org) for commit guidelines. + + + +## [v0.1.2](https://github.com/ash-project/ash_sqlite/compare/v0.1.2-rc.1...v0.1.2) (2024-05-11) + + + + +## [v0.1.2-rc.1](https://github.com/ash-project/ash_sqlite/compare/v0.1.2-rc.0...v0.1.2-rc.1) (2024-05-06) + + + + +### Bug Fixes: + +* properly scope deletes to the records in question + +* update ash_sqlite to get `ilike` behavior fix + +### Improvements: + +* support `contains` function + +## [v0.1.2-rc.0](https://github.com/ash-project/ash_sqlite/compare/v0.1.1...v0.1.2-rc.0) (2024-04-15) + + + + +### Bug Fixes: + +* reenable mix tasks that we need to call + +### Improvements: + +* support `mix ash.rollback` + +* support Ash 3.0, leverage `ash_sql` package + +* fix datetime migration type discovery + +## [v0.1.1](https://github.com/ash-project/ash_sqlite/compare/v0.1.0...v0.1.1) (2023-10-12) + + + + +### Improvements: + +* add `SqliteMigrationDefault` + +* support query aggregates + +## [v0.1.0](https://github.com/ash-project/ash_sqlite/compare/v0.1.0...v0.1.0) (2023-10-12) + + +### Improvements: + +* Port and adjust `AshPostgres` to `AshSqlite` diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..4eb51a5 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Zachary Scott Daniel + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..c632cb2 --- /dev/null +++ b/README.md @@ -0,0 +1,38 @@ +![Logo](https://github.com/ash-project/ash/blob/main/logos/cropped-for-header-black-text.png?raw=true#gh-light-mode-only) +![Logo](https://github.com/ash-project/ash/blob/main/logos/cropped-for-header-white-text.png?raw=true#gh-dark-mojde-only) + +[![CI](https://github.com/ash-project/ash_sqlite/actions/workflows/elixir.yml/badge.svg)](https://github.com/ash-project/ash_sqlite/actions/workflows/elixir.yml) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) +[![Hex version badge](https://img.shields.io/hexpm/v/ash_sqlite.svg)](https://hex.pm/packages/ash_sqlite) +[![Hexdocs badge](https://img.shields.io/badge/docs-hexdocs-purple)](https://hexdocs.pm/ash_sqlite) + +# AshSqlite + +Welcome! `AshSqlite` is the SQLite data layer for [Ash Framework](https://hexdocs.pm/ash). + +## Tutorials + +- [Get Started](documentation/tutorials/getting-started-with-ash-sqlite.md) + +## Topics + +- [What is AshSqlite?](documentation/topics/about-ash-sqlite/what-is-ash-sqlite.md) + +### Resources + +- [References](documentation/topics/resources/references.md) +- [Polymorphic Resources](documentation/topics/resources/polymorphic-resources.md) + +### Development + +- [Migrations and tasks](documentation/topics/development/migrations-and-tasks.md) +- [Testing](documentation/topics/development/testing.md) + +### Advanced + +- [Expressions](documentation/topics/advanced/expressions.md) +- [Manual Relationships](documentation/topics/advanced/manual-relationships.md) + +## Reference + +- [AshSqlite.DataLayer DSL](documentation/dsls/DSL:-AshSqlite.DataLayer.md) diff --git a/config/config.exs b/config/config.exs new file mode 100644 index 0000000..f81cdd0 --- /dev/null +++ b/config/config.exs @@ -0,0 +1,54 @@ +import Config + +if Mix.env() == :dev do + config :git_ops, + mix_project: AshSqlite.MixProject, + changelog_file: "CHANGELOG.md", + repository_url: "https://github.com/ash-project/ash_sqlite", + # Instructs the tool to manage your mix version in your `mix.exs` file + # See below for more information + manage_mix_version?: true, + # Instructs the tool to manage the version in your README.md + # Pass in `true` to use `"README.md"` or a string to customize + manage_readme_version: [ + "README.md", + "documentation/tutorials/getting-started-with-ash-sqlite.md" + ], + version_tag_prefix: "v" +end + +if Mix.env() == :test do + config :ash, :validate_domain_resource_inclusion?, false + config :ash, :validate_domain_config_inclusion?, false + + config :ash_sqlite, AshSqlite.TestRepo, + username: "root", + database: "ash_mysql_test", + hostname: "localhost", + pool: Ecto.Adapters.SQL.Sandbox + + # sobelow_skip ["Config.Secrets"] + config :ash_sqlite, AshSqlite.TestRepo, password: "root" + + config :ash_sqlite, AshSqlite.TestRepo, migration_primary_key: [name: :id, type: :binary_id] + + config :ash_sqlite, AshSqlite.TestNoSandboxRepo, + username: "root", + database: "ash_mysql_test", + hostname: "localhost" + + # sobelow_skip ["Config.Secrets"] + config :ash_sqlite, AshSqlite.TestNoSandboxRepo, password: "root" + + config :ash_sqlite, AshSqlite.TestNoSandboxRepo, + migration_primary_key: [name: :id, type: :binary_id] + + # ecto_repos: [AshSqlite.TestRepo, AshSqlite.TestNoSandboxRepo], + config :ash_sqlite, + ecto_repos: [AshSqlite.TestRepo], + ash_domains: [ + AshSqlite.Test.Domain + ] + + config :logger, level: :debug +end diff --git a/documentation/dsls/DSL:-AshSqlite.DataLayer.md b/documentation/dsls/DSL:-AshSqlite.DataLayer.md new file mode 100644 index 0000000..0d68638 --- /dev/null +++ b/documentation/dsls/DSL:-AshSqlite.DataLayer.md @@ -0,0 +1,280 @@ + +# DSL: AshSqlite.DataLayer + +A sqlite data layer that leverages Ecto's sqlite capabilities. + + +## sqlite +Sqlite data layer configuration + + +### Nested DSLs + * [custom_indexes](#sqlite-custom_indexes) + * index + * [custom_statements](#sqlite-custom_statements) + * statement + * [references](#sqlite-references) + * reference + + +### Examples +``` +sqlite do + repo MyApp.Repo + table "organizations" +end + +``` + + + + +### Options + +| Name | Type | Default | Docs | +|------|------|---------|------| +| [`repo`](#sqlite-repo){: #sqlite-repo .spark-required} | `atom` | | The repo that will be used to fetch your data. See the `AshSqlite.Repo` documentation for more | +| [`migrate?`](#sqlite-migrate?){: #sqlite-migrate? } | `boolean` | `true` | Whether or not to include this resource in the generated migrations with `mix ash.generate_migrations` | +| [`migration_types`](#sqlite-migration_types){: #sqlite-migration_types } | `keyword` | `[]` | A keyword list of attribute names to the ecto migration type that should be used for that attribute. Only necessary if you need to override the defaults. | +| [`migration_defaults`](#sqlite-migration_defaults){: #sqlite-migration_defaults } | `keyword` | `[]` | A keyword list of attribute names to the ecto migration default that should be used for that attribute. The string you use will be placed verbatim in the migration. Use fragments like `fragment(\\"now()\\")`, or for `nil`, use `\\"nil\\"`. | +| [`base_filter_sql`](#sqlite-base_filter_sql){: #sqlite-base_filter_sql } | `String.t` | | A raw sql version of the base_filter, e.g `representative = true`. Required if trying to create a unique constraint on a resource with a base_filter | +| [`skip_unique_indexes`](#sqlite-skip_unique_indexes){: #sqlite-skip_unique_indexes } | `atom \| list(atom)` | `false` | Skip generating unique indexes when generating migrations | +| [`unique_index_names`](#sqlite-unique_index_names){: #sqlite-unique_index_names } | `list({list(atom), String.t} \| {list(atom), String.t, String.t})` | `[]` | A list of unique index names that could raise errors that are not configured in identities, or an mfa to a function that takes a changeset and returns the list. In the format `{[:affected, :keys], "name_of_constraint"}` or `{[:affected, :keys], "name_of_constraint", "custom error message"}` | +| [`exclusion_constraint_names`](#sqlite-exclusion_constraint_names){: #sqlite-exclusion_constraint_names } | `any` | `[]` | A list of exclusion constraint names that could raise errors. Must be in the format `{:affected_key, "name_of_constraint"}` or `{:affected_key, "name_of_constraint", "custom error message"}` | +| [`identity_index_names`](#sqlite-identity_index_names){: #sqlite-identity_index_names } | `any` | `[]` | A keyword list of identity names to the unique index name that they should use when being managed by the migration generator. | +| [`foreign_key_names`](#sqlite-foreign_key_names){: #sqlite-foreign_key_names } | `list({atom, String.t} \| {String.t, String.t})` | `[]` | A list of foreign keys that could raise errors, or an mfa to a function that takes a changeset and returns a list. In the format: `{:key, "name_of_constraint"}` or `{:key, "name_of_constraint", "custom error message"}` | +| [`migration_ignore_attributes`](#sqlite-migration_ignore_attributes){: #sqlite-migration_ignore_attributes } | `list(atom)` | `[]` | A list of attributes that will be ignored when generating migrations. | +| [`table`](#sqlite-table){: #sqlite-table } | `String.t` | | The table to store and read the resource from. If this is changed, the migration generator will not remove the old table. | +| [`polymorphic?`](#sqlite-polymorphic?){: #sqlite-polymorphic? } | `boolean` | `false` | Declares this resource as polymorphic. See the [polymorphic resources guide](/documentation/topics/resources/polymorphic-resources.md) for more. | + + +## sqlite.custom_indexes +A section for configuring indexes to be created by the migration generator. + +In general, prefer to use `identities` for simple unique constraints. This is a tool to allow +for declaring more complex indexes. + + +### Nested DSLs + * [index](#sqlite-custom_indexes-index) + + +### Examples +``` +custom_indexes do + index [:column1, :column2], unique: true, where: "thing = TRUE" +end + +``` + + + + +## sqlite.custom_indexes.index +```elixir +index fields +``` + + +Add an index to be managed by the migration generator. + + + + +### Examples +``` +index ["column", "column2"], unique: true, where: "thing = TRUE" +``` + + + +### Arguments + +| Name | Type | Default | Docs | +|------|------|---------|------| +| [`fields`](#sqlite-custom_indexes-index-fields){: #sqlite-custom_indexes-index-fields } | `atom \| String.t \| list(atom \| String.t)` | | The fields to include in the index. | +### Options + +| Name | Type | Default | Docs | +|------|------|---------|------| +| [`name`](#sqlite-custom_indexes-index-name){: #sqlite-custom_indexes-index-name } | `String.t` | | the name of the index. Defaults to "#{table}_#{column}_index". | +| [`unique`](#sqlite-custom_indexes-index-unique){: #sqlite-custom_indexes-index-unique } | `boolean` | `false` | indicates whether the index should be unique. | +| [`using`](#sqlite-custom_indexes-index-using){: #sqlite-custom_indexes-index-using } | `String.t` | | configures the index type. | +| [`where`](#sqlite-custom_indexes-index-where){: #sqlite-custom_indexes-index-where } | `String.t` | | specify conditions for a partial index. | +| [`message`](#sqlite-custom_indexes-index-message){: #sqlite-custom_indexes-index-message } | `String.t` | | A custom message to use for unique indexes that have been violated | +| [`include`](#sqlite-custom_indexes-index-include){: #sqlite-custom_indexes-index-include } | `list(String.t)` | | specify fields for a covering index. This is not supported by all databases. For more information on SQLite support, please read the official docs. | + + + + + +### Introspection + +Target: `AshSqlite.CustomIndex` + + +## sqlite.custom_statements +A section for configuring custom statements to be added to migrations. + +Changing custom statements may require manual intervention, because Ash can't determine what order they should run +in (i.e if they depend on table structure that you've added, or vice versa). As such, any `down` statements we run +for custom statements happen first, and any `up` statements happen last. + +Additionally, when changing a custom statement, we must make some assumptions, i.e that we should migrate +the old structure down using the previously configured `down` and recreate it. + +This may not be desired, and so what you may end up doing is simply modifying the old migration and deleting whatever was +generated by the migration generator. As always: read your migrations after generating them! + + +### Nested DSLs + * [statement](#sqlite-custom_statements-statement) + + +### Examples +``` +custom_statements do + # the name is used to detect if you remove or modify the statement + statement :pgweb_idx do + up "CREATE INDEX pgweb_idx ON pgweb USING GIN (to_tsvector('english', title || ' ' || body));" + down "DROP INDEX pgweb_idx;" + end +end + +``` + + + + +## sqlite.custom_statements.statement +```elixir +statement name +``` + + +Add a custom statement for migrations. + + + + +### Examples +``` +statement :pgweb_idx do + up "CREATE INDEX pgweb_idx ON pgweb USING GIN (to_tsvector('english', title || ' ' || body));" + down "DROP INDEX pgweb_idx;" +end + +``` + + + +### Arguments + +| Name | Type | Default | Docs | +|------|------|---------|------| +| [`name`](#sqlite-custom_statements-statement-name){: #sqlite-custom_statements-statement-name .spark-required} | `atom` | | The name of the statement, must be unique within the resource | +### Options + +| Name | Type | Default | Docs | +|------|------|---------|------| +| [`up`](#sqlite-custom_statements-statement-up){: #sqlite-custom_statements-statement-up .spark-required} | `String.t` | | How to create the structure of the statement | +| [`down`](#sqlite-custom_statements-statement-down){: #sqlite-custom_statements-statement-down .spark-required} | `String.t` | | How to tear down the structure of the statement | +| [`code?`](#sqlite-custom_statements-statement-code?){: #sqlite-custom_statements-statement-code? } | `boolean` | `false` | By default, we place the strings inside of ecto migration's `execute/1` function and assume they are sql. Use this option if you want to provide custom elixir code to be placed directly in the migrations | + + + + + +### Introspection + +Target: `AshSqlite.Statement` + + +## sqlite.references +A section for configuring the references (foreign keys) in resource migrations. + +This section is only relevant if you are using the migration generator with this resource. +Otherwise, it has no effect. + + +### Nested DSLs + * [reference](#sqlite-references-reference) + + +### Examples +``` +references do + reference :post, on_delete: :delete, on_update: :update, name: "comments_to_posts_fkey" +end + +``` + + + + +### Options + +| Name | Type | Default | Docs | +|------|------|---------|------| +| [`polymorphic_on_delete`](#sqlite-references-polymorphic_on_delete){: #sqlite-references-polymorphic_on_delete } | `:delete \| :nilify \| :nothing \| :restrict` | | For polymorphic resources, configures the on_delete behavior of the automatically generated foreign keys to source tables. | +| [`polymorphic_on_update`](#sqlite-references-polymorphic_on_update){: #sqlite-references-polymorphic_on_update } | `:update \| :nilify \| :nothing \| :restrict` | | For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables. | +| [`polymorphic_name`](#sqlite-references-polymorphic_name){: #sqlite-references-polymorphic_name } | `:update \| :nilify \| :nothing \| :restrict` | | For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables. | + + + +## sqlite.references.reference +```elixir +reference relationship +``` + + +Configures the reference for a relationship in resource migrations. + +Keep in mind that multiple relationships can theoretically involve the same destination and foreign keys. +In those cases, you only need to configure the `reference` behavior for one of them. Any conflicts will result +in an error, across this resource and any other resources that share a table with this one. For this reason, +instead of adding a reference configuration for `:nothing`, its best to just leave the configuration out, as that +is the default behavior if *no* relationship anywhere has configured the behavior of that reference. + + + + +### Examples +``` +reference :post, on_delete: :delete, on_update: :update, name: "comments_to_posts_fkey" +``` + + + +### Arguments + +| Name | Type | Default | Docs | +|------|------|---------|------| +| [`relationship`](#sqlite-references-reference-relationship){: #sqlite-references-reference-relationship .spark-required} | `atom` | | The relationship to be configured | +### Options + +| Name | Type | Default | Docs | +|------|------|---------|------| +| [`ignore?`](#sqlite-references-reference-ignore?){: #sqlite-references-reference-ignore? } | `boolean` | | If set to true, no reference is created for the given relationship. This is useful if you need to define it in some custom way | +| [`on_delete`](#sqlite-references-reference-on_delete){: #sqlite-references-reference-on_delete } | `:delete \| :nilify \| :nothing \| :restrict` | | What should happen to records of this resource when the referenced record of the *destination* resource is deleted. | +| [`on_update`](#sqlite-references-reference-on_update){: #sqlite-references-reference-on_update } | `:update \| :nilify \| :nothing \| :restrict` | | What should happen to records of this resource when the referenced destination_attribute of the *destination* record is update. | +| [`deferrable`](#sqlite-references-reference-deferrable){: #sqlite-references-reference-deferrable } | `false \| true \| :initially` | `false` | Wether or not the constraint is deferrable. This only affects the migration generator. | +| [`name`](#sqlite-references-reference-name){: #sqlite-references-reference-name } | `String.t` | | The name of the foreign key to generate in the database. Defaults to __fkey | + + + + + +### Introspection + +Target: `AshSqlite.Reference` + + + + + + + + diff --git a/documentation/topics/about-ash-sqlite/what-is-ash-sqlite.md b/documentation/topics/about-ash-sqlite/what-is-ash-sqlite.md new file mode 100644 index 0000000..b77be02 --- /dev/null +++ b/documentation/topics/about-ash-sqlite/what-is-ash-sqlite.md @@ -0,0 +1,34 @@ +# What is AshSqlite? + +AshSqlite is the SQLite `Ash.DataLayer` for [Ash Framework](https://hexdocs.pm/ash). This doesn't have all of the features of [AshPostgres](https://hexdocs.pm/ash_postgres), but it does support most of the features of Ash data layers. The main feature missing is Aggregate support. + +Use this to persist records in a SQLite table. For example, the resource below would be persisted in a table called `tweets`: + +```elixir +defmodule MyApp.Tweet do + use Ash.Resource, + data_layer: AshSQLite.DataLayer + + attributes do + integer_primary_key :id + attribute :text, :string + end + + relationships do + belongs_to :author, MyApp.User + end + + sqlite do + table "tweets" + repo MyApp.Repo + end +end +``` + +The table might look like this: + +| id | text | author_id | +| --- | --------------- | --------- | +| 1 | "Hello, world!" | 1 | + +Creating records would add to the table, destroying records would remove from the table, and updating records would update the table. diff --git a/documentation/topics/advanced/expressions.md b/documentation/topics/advanced/expressions.md new file mode 100644 index 0000000..f377eed --- /dev/null +++ b/documentation/topics/advanced/expressions.md @@ -0,0 +1,61 @@ +# Expressions + +In addition to the expressions listed in the [Ash expressions guide](https://hexdocs.pm/ash/expressions.html), AshSqlite provides the following expressions + +# Fragments + +Fragments allow you to use arbitrary sqlite expressions in your queries. Fragments can often be an escape hatch to allow you to do things that don't have something officially supported with Ash. + +### Examples + +#### Simple expressions + +```elixir +fragment("? / ?", points, count) +``` + +#### Calling functions + +```elixir +fragment("repeat('hello', 4)") +``` + +#### Using entire queries + +```elixir +fragment("points > (SELECT SUM(points) FROM games WHERE user_id = ? AND id != ?)", user_id, id) +``` + +> ### a last resport {: .warning} +> +> Using entire queries as shown above is a last resort, but can sometimes be the best way to accomplish a given task. + +#### In calculations + +```elixir +calculations do + calculate :lower_name, :string, expr( + fragment("LOWER(?)", name) + ) +end +``` + +#### In migrations + +```elixir +create table(:managers, primary_key: false) do + add :id, :uuid, null: false, default: fragment("UUID_GENERATE_V4()"), primary_key: true +end +``` + +## Like + +These wrap the sqlite builtin like operator + +Please be aware, these match _patterns_ not raw text. Use `contains/1` if you want to match text without supporting patterns, i.e `%` and `_` have semantic meaning! + +For example: + +```elixir +Ash.Query.filter(User, like(name, "%obo%")) # name contains obo anywhere in the string, case sensitively +``` diff --git a/documentation/topics/advanced/manual-relationships.md b/documentation/topics/advanced/manual-relationships.md new file mode 100644 index 0000000..ad431cf --- /dev/null +++ b/documentation/topics/advanced/manual-relationships.md @@ -0,0 +1,87 @@ +# Join Manual Relationships + +See [Defining Manual Relationships](https://hexdocs.pm/ash/defining-manual-relationships.html) for an idea of manual relationships in general. +Manual relationships allow for expressing complex/non-typical relationships between resources in a standard way. +Individual data layers may interact with manual relationships in their own way, so see their corresponding guides. + +## Example + +```elixir +# in the resource + +relationships do + has_many :tickets_above_threshold, Helpdesk.Support.Ticket do + manual Helpdesk.Support.Ticket.Relationships.TicketsAboveThreshold + end +end + +# implementation +defmodule Helpdesk.Support.Ticket.Relationships.TicketsAboveThreshold do + use Ash.Resource.ManualRelationship + use AshSqlite.ManualRelationship + + require Ash.Query + require Ecto.Query + + def load(records, _opts, %{query: query, actor: actor, authorize?: authorize?}) do + # Use existing records to limit resultds + rep_ids = Enum.map(records, & &1.id) + # Using Ash to get the destination records is ideal, so you can authorize access like normal + # but if you need to use a raw ecto query here, you can. As long as you return the right structure. + + {:ok, + query + |> Ash.Query.filter(representative_id in ^rep_ids) + |> Ash.Query.filter(priority > representative.priority_threshold) + |> Helpdesk.Support.read!(actor: actor, authorize?: authorize?) + # Return the items grouped by the primary key of the source, i.e representative.id => [...tickets above threshold] + |> Enum.group_by(& &1.representative_id)} + end + + # query is the "source" query that is being built. + + # _opts are options provided to the manual relationship, i.e `{Manual, opt: :val}` + + # current_binding is what the source of the relationship is bound to. Access fields with `as(^current_binding).field` + + # as_binding is the binding that your join should create. When you join, make sure you say `as: ^as_binding` on the + # part of the query that represents the destination of the relationship + + # type is `:inner` or `:left`. + # destination_query is what you should join to to add the destination to the query, i.e `join: dest in ^destination-query` + def ash_sqlite_join(query, _opts, current_binding, as_binding, :inner, destination_query) do + {:ok, + Ecto.Query.from(_ in query, + join: dest in ^destination_query, + as: ^as_binding, + on: dest.representative_id == as(^current_binding).id, + on: dest.priority > as(^current_binding).priority_threshold + )} + end + + def ash_sqlite_join(query, _opts, current_binding, as_binding, :left, destination_query) do + {:ok, + Ecto.Query.from(_ in query, + left_join: dest in ^destination_query, + as: ^as_binding, + on: dest.representative_id == as(^current_binding).id, + on: dest.priority > as(^current_binding).priority_threshold + )} + end + + # _opts are options provided to the manual relationship, i.e `{Manual, opt: :val}` + + # current_binding is what the source of the relationship is bound to. Access fields with `parent_as(^current_binding).field` + + # as_binding is the binding that has already been created for your join. Access fields on it via `as(^as_binding)` + + # destination_query is what you should use as the basis of your query + def ash_sqlite_subquery(_opts, current_binding, as_binding, destination_query) do + {:ok, + Ecto.Query.from(_ in destination_query, + where: parent_as(^current_binding).id == as(^as_binding).representative_id, + where: as(^as_binding).priority > parent_as(^current_binding).priority_threshold + )} + end +end +``` diff --git a/documentation/topics/development/migrations-and-tasks.md b/documentation/topics/development/migrations-and-tasks.md new file mode 100644 index 0000000..af9cd83 --- /dev/null +++ b/documentation/topics/development/migrations-and-tasks.md @@ -0,0 +1,98 @@ +# Migrations + +## Tasks + +Ash comes with its own tasks, and AshSqlite exposes lower level tasks that you can use if necessary. This guide shows the process using `ash.*` tasks, and the `ash_sqlite.*` tasks are illustrated at the bottom. + +## Basic W## Basic Workflow + +- Make resource changes +- Run `mix ash.codegen --name add_a_combobulator` to generate migrations and resource snapshots +- Run `mix ash.migrate` to run those migrations + +For more information on generating migrations, run `mix help ash_sqlite.generate_migrations` (the underlying task that is called by `mix ash.migrate`) + +### Regenerating Migratio### Regenerating Migrations + +Often, you will run into a situation where you want to make a slight change to a resource after you've already generated and run migrations. If you are using git and would like to undo those changes, then regenerate the migrations, this script may prove useful: + +```bash +#!/bin/bash + +# Get count of untracked migrations +N_MIGRATIONS=$(git ls-files --others priv/repo/migrations | wc -l) + +# Rollback untracked migrations +mix ash_sqlite.rollback -n $N_MIGRATIONS + +# Delete untracked migrations and snapshots +git ls-files --others priv/repo/migrations | xargs rm +git ls-files --others priv/resource_snapshots | xargs rm + +# Regenerate migrations +mix ash.codegen --name $1 + +# Run migrations if flag +if echo $* | grep -e "-m" -q +then + mix ash.migrate +fi +``` + +After saving this file to something like `regen.sh`, make it executable with `chmod +x regen.sh`. Now you can run it with `./regen.sh name_of_operation`. If you would like the migrations to automatically run after regeneration, add the `-m` flag: `./regen.sh name_of_operation -m`. + +## Multiple Repos + +If you are using multiple repos, you will likely need to use `mix ecto.migrate` and manage it separately for each repo, as the options would +be applied to both repo, which wouldn't make sense. + +## Running Migrations in Production + +Define a module similar to the following: + +```elixir +defmodule MyApp.Release do + @moduledoc """ + Houses tasks that need to be executed in the released application (because mix is not present in releases). + """ + @app :my_ap + def migrate do + load_app() + + for repo <- repos() do + {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :up, all: true)) + end + end + + def rollback(repo, version) do + load_app() + {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :down, to: version)) + end + + defp repos do + domains() + |> Enum.flat_map(fn domain -> + domain + |> Ash.Domain.Info.resources() + |> Enum.map(&AshSqlite.repo/1) + end) + |> Enum.uniq() + end + + defp domains do + Application.fetch_env!(:my_app, :ash_domains) + end + + defp load_app do + Application.load(@app) + end +end +``` + +# AshSqlite-specific tasks + +- `mix ash_sqlite.generate_migrations` +- `mix ash_sqlite.create` +- `mix ash_sqlite.migrate` +- `mix ash_sqlite.rollback` +- `mix ash_sqlite.drop` diff --git a/documentation/topics/development/testing.md b/documentation/topics/development/testing.md new file mode 100644 index 0000000..772c887 --- /dev/null +++ b/documentation/topics/development/testing.md @@ -0,0 +1,11 @@ +# Testing With Sqlite + +Testing resources with SQLite generally requires passing `async?: false` to +your tests, due to `SQLite`'s limitation of having a single write transaction +open at any one time. + +This should be coupled with to make sure that Ash does not spawn any tasks. + +```elixir +config :ash, :disable_async?, true +``` diff --git a/documentation/topics/resources/polymorphic-resources.md b/documentation/topics/resources/polymorphic-resources.md new file mode 100644 index 0000000..2616fe9 --- /dev/null +++ b/documentation/topics/resources/polymorphic-resources.md @@ -0,0 +1,85 @@ +# Polymorphic Resources + +To support leveraging the same resource backed by multiple tables (useful for things like polymorphic associations), AshSqlite supports setting the `data_layer.table` context for a given resource. For this example, lets assume that you have a `MyApp.Post` resource and a `MyApp.Comment` resource. For each of those resources, users can submit `reactions`. However, you want a separate table for `post_reactions` and `comment_reactions`. You could accomplish that like so: + +```elixir +defmodule MyApp.Reaction do + use Ash.Resource, + domain: MyApp.Domain, + data_layer: AshSqlite.DataLayer + + sqlite do + polymorphic? true # Without this, `table` is a required configuration + end + + attributes do + attribute(:resource_id, :uuid) + end + + ... +end +``` + +Then, in your related resources, you set the table context like so: + +```elixir +defmodule MyApp.Post do + use Ash.Resource, + domain: MyApp.Domain, + data_layer: AshSqlite.DataLayer + + ... + + relationships do + has_many :reactions, MyApp.Reaction, + relationship_context: %{data_layer: %{table: "post_reactions"}}, + destination_attribute: :resource_id + end +end + +defmodule MyApp.Comment do + use Ash.Resource, + domain: MyApp.Domain, + data_layer: AshSqlite.DataLayer + + ... + + relationships do + has_many :reactions, MyApp.Reaction, + relationship_context: %{data_layer: %{table: "comment_reactions"}}, + destination_attribute: :resource_id + end +end +``` + +With this, when loading or editing related data, ash will automatically set that context. +For managing related data, see `Ash.Changeset.manage_relationship/4` and other relationship functions +in `Ash.Changeset` + +## Table specific actions + +To make actions use a specific table, you can use the `set_context` query preparation/change. + +For example: + +```elixir +defmodule MyApp.Reaction do + actions do + read :for_comments do + prepare set_context(%{data_layer: %{table: "comment_reactions"}}) + end + + read :for_posts do + prepare set_context(%{data_layer: %{table: "post_reactions"}}) + end + end +end +``` + +## Migrations + +When a migration is marked as `polymorphic? true`, the migration generator will look at +all resources that are related to it, that set the `%{data_layer: %{table: "table"}}` context. +For each of those, a migration is generated/managed automatically. This means that adding reactions +to a new resource is as easy as adding the relationship and table context, and then running +`mix ash_sqlite.generate_migrations`. diff --git a/documentation/topics/resources/references.md b/documentation/topics/resources/references.md new file mode 100644 index 0000000..ceecb61 --- /dev/null +++ b/documentation/topics/resources/references.md @@ -0,0 +1,23 @@ +# References + +To configure the foreign keys on a resource, we use the `references` block. + +For example: + +```elixir +references do + reference :post, on_delete: :delete, on_update: :update, name: "comments_to_posts_fkey" +end +``` + +## Important + +No resource logic is applied with these operations! No authorization rules or validations take place, and no notifications are issued. This operation happens *directly* in the database. That + +## Nothing vs Restrict + +The difference between `:nothing` and `:restrict` is subtle and, if you are unsure, choose `:nothing` (the default behavior). `:restrict` will prevent the deletion from happening *before* the end of the database transaction, whereas `:nothing` allows the transaction to complete before doing so. This allows for things like updating or deleting the destination row and *then* updating updating or deleting the reference(as long as you are in a transaction). + +## On Delete + +This option is called `on_delete`, instead of `on_destroy`, because it is hooking into the database level deletion, *not* a `destroy` action in your resource. diff --git a/documentation/tutorials/getting-started-with-ash-sqlite.md b/documentation/tutorials/getting-started-with-ash-sqlite.md new file mode 100644 index 0000000..fc428bc --- /dev/null +++ b/documentation/tutorials/getting-started-with-ash-sqlite.md @@ -0,0 +1,277 @@ +# Getting Started With AshSqlite + +## Goals + +In this guide we will: + +1. Setup AshSqlite, which includes setting up [Ecto](https://hexdocs.pm/ecto/Ecto.html) +2. Add AshSqlite to the resources created in [the Ash getting started guide](https://hexdocs.pm/ash/get-started.html) +3. Show how the various features of AshSqlite can help you work quickly and cleanly against a sqlite database +4. Highlight some of the more advanced features you can use when using AshSqlite. +5. Point you to additional resources you may need on your journey + +## Requirements + +- A working SQLite installation, with a sufficiently permissive user +- If you would like to follow along, you will need to add begin with [the Ash getting started guide](https://hexdocs.pm/ash/get-started.html) + +## Steps + +### Add AshSqlite + +Add the `:ash_sqlite` dependency to your application + +`{:ash_sqlite, "~> 0.1.2"}` + +Add `:ash_sqlite` to your `.formatter.exs` file + +```elixir +[ + # import the formatter rules from `:ash_sqlite` + import_deps: [..., :ash_sqlite], + inputs: [...] +] +``` + +### Create and configure your Repo + +Create `lib/helpdesk/repo.ex` with the following contents. `AshSqlite.Repo` is a thin wrapper around `Ecto.Repo`, so see their documentation for how to use it if you need to use it directly. For standard Ash usage, all you will need to do is configure your resources to use your repo. + +```elixir +# in lib/helpdesk/repo.ex + +defmodule Helpdesk.Repo do + use AshSqlite.Repo, otp_app: :helpdesk +end +``` + +Next we will need to create configuration files for various environments. Run the following to create the configuration files we need. + +```bash +mkdir -p config +touch config/config.exs +touch config/dev.exs +touch config/runtime.exs +touch config/test.exs +``` + +Place the following contents in those files, ensuring that the credentials match the user you created for your database. For most conventional installations this will work out of the box. If you've followed other guides before this one, they may have had you create these files already, so just make sure these contents are there. + +```elixir +# in config/config.exs +import Config + +# This should already have been added in the first +# getting started guide +config :helpdesk, + ash_apis: [Helpdesk.Support] + +config :helpdesk, + ecto_repos: [Helpdesk.Repo] + +# Import environment specific config. This must remain at the bottom +# of this file so it overrides the configuration defined above. +import_config "#{config_env()}.exs" +``` + +```elixir +# in config/dev.exs + +import Config + +# Configure your database +config :helpdesk, Helpdesk.Repo, + database: Path.join(__DIR__, "../path/to/your.db"), + port: 5432, + show_sensitive_data_on_connection_error: true, + pool_size: 10 +``` + +```elixir +# in config/runtime.exs + +import Config + +if config_env() == :prod do + config :helpdesk, Helpdesk.Repo, + pool_size: String.to_integer(System.get_env("POOL_SIZE") || "10") +end +``` + +```elixir +# in config/test.exs + +import Config + +# Configure your database +# +# The MIX_TEST_PARTITION environment variable can be used +# to provide built-in test partitioning in CI environment. +# Run `mix help test` for more information. +config :helpdesk, Helpdesk.Repo, + database: Path.join(__DIR__, "../path/to/your#{System.get_env("MIX_TEST_PARTITION")}.db"), + pool_size: 10 +``` + +And finally, add the repo to your application + +```elixir +# in lib/helpdesk/application.ex + + def start(_type, _args) do + children = [ + # Starts a worker by calling: Helpdesk.Worker.start_link(arg) + # {Helpdesk.Worker, arg} + Helpdesk.Repo + ] + + ... +``` + +### Add AshSqlite to our resources + +Now we can add the data layer to our resources. The basic configuration for a resource requires the `d:AshSqlite.sqlite|table` and the `d:AshSqlite.sqlite|repo`. + +```elixir +# in lib/helpdesk/support/resources/ticket.ex + + use Ash.Resource, + domain: MyApp.Domain, + data_layer: AshSqlite.DataLayer + + sqlite do + table "tickets" + repo Helpdesk.Repo + end +``` + +```elixir +# in lib/helpdesk/support/resources/representative.ex + + use Ash.Resource, + domain: MyApp.Domain, + data_layer: AshSqlite.DataLayer + + sqlite do + table "representatives" + repo Helpdesk.Repo + end +``` + +### Create the database and tables + +First, we'll create the database with `mix ash_sqlite.create`. + +Then we will generate database migrations. This is one of the many ways that AshSqlite can save time and reduce complexity. + +```bash +mix ash_sqlite.generate_migrations --name add_tickets_and_representatives +``` + +If you are unfamiliar with database migrations, it is a good idea to get a rough idea of what they are and how they work. See the links at the bottom of this guide for more. A rough overview of how migrations work is that each time you need to make changes to your database, they are saved as small, reproducible scripts that can be applied in order. This is necessary both for clean deploys as well as working with multiple developers making changes to the structure of a single database. + +Typically, you need to write these by hand. AshSqlite, however, will store snapshots each time you run the command to generate migrations and will figure out what migrations need to be created. + +You should always look at the generated migrations to ensure that they look correct. Do so now by looking at the generated file in `priv/repo/migrations`. + +Finally, we will create the local database and apply the generated migrations: + +```bash +mix ash_sqlite.create +mix ash_sqlite.migrate +``` + +### Try it out + +And now we're ready to try it out! Run the following in iex: + +Lets create some data. We'll make a representative and give them some open and some closed tickets. + +```elixir +require Ash.Query + +representative = ( + Helpdesk.Support.Representative + |> Ash.Changeset.for_create(:create, %{name: "Joe Armstrong"}) + |> Helpdesk.Support.create!() +) + +for i <- 0..5 do + ticket = + Helpdesk.Support.Ticket + |> Ash.Changeset.for_create(:open, %{subject: "Issue #{i}"}) + |> Helpdesk.Support.create!() + |> Ash.Changeset.for_update(:assign, %{representative_id: representative.id}) + |> Helpdesk.Support.update!() + + if rem(i, 2) == 0 do + ticket + |> Ash.Changeset.for_update(:close) + |> Helpdesk.Support.update!() + end +end +``` + +And now we can read that data. You should see some debug logs that show the sql queries AshSqlite is generating. + +```elixir +require Ash.Query + +# Show the tickets where the subject equals "foobar" +Helpdesk.Support.Ticket +|> Ash.Query.filter(subject == "foobar") +|> Helpdesk.Support.read!() +``` + +```elixir +require Ash.Query + +# Show the tickets that are closed and their subject does not equal "barbaz" +Helpdesk.Support.Ticket +|> Ash.Query.filter(status == :closed and not(subject == "barbaz")) +|> Helpdesk.Support.read!() +``` + +And, naturally, now that we are storing this in sqlite, this database is persisted even if we stop/start our application. The nice thing, however, is that this was the _exact_ same code that we ran against our resources when they were backed by ETS. + +### Calculations + +Calculations can be pushed down into SQL using expressions. + +For example, we can determine the percentage of tickets that are open: + +```elixir +# in lib/helpdesk/support/resources/representative.ex + + calculations do + calculate :percent_open, :float, expr(open_tickets / total_tickets ) + end +``` + +Calculations can be loaded. + +```elixir +require Ash.Query + +Helpdesk.Support.Representative +|> Ash.Query.filter(percent_open > 0.25) +|> Ash.Query.sort(:percent_open) +|> Ash.Query.load(:percent_open) +|> Helpdesk.Support.read!() +``` + +### Rich Configuration Options + +Take a look at the DSL documentation for more information on what you can configure. You can add check constraints, configure the behavior of foreign keys and more! + +### Deployment + +When deploying, you will need to ensure that the file you are using in production is persisted in some way (probably, unless you want it to disappear whenever your deployed system restarts). For example with fly.io this might mean adding a volume to your deployment. + +### What next? + +- Check out the data layer docs: `AshSqlite.DataLayer` + +- [Ecto's documentation](https://hexdocs.pm/ecto/Ecto.html). AshSqlite (and much of Ash itself) is made possible by the amazing Ecto. If you find yourself looking for escape hatches when using Ash or ways to work directly with your database, you will want to know how Ecto works. Ash and AshSqlite intentionally do not hide Ecto, and in fact encourages its use whenever you need an escape hatch. + +- [Ecto's Migration documentation](https://hexdocs.pm/ecto_sql/Ecto.Migration.html) read more about migrations. Even with the ash_sqlite migration generator, you will very likely need to modify your own migrations some day. diff --git a/lib/ash_sqlite.ex b/lib/ash_sqlite.ex new file mode 100644 index 0000000..e09956f --- /dev/null +++ b/lib/ash_sqlite.ex @@ -0,0 +1,7 @@ +defmodule AshSqlite do + @moduledoc """ + The AshSqlite extension gives you tools to map a resource to a sqlite database table. + + For more, check out the [getting started guide](/documentation/tutorials/getting-started-with-ash-sqlite.md) + """ +end diff --git a/lib/custom_extension.ex b/lib/custom_extension.ex new file mode 100644 index 0000000..62cdaad --- /dev/null +++ b/lib/custom_extension.ex @@ -0,0 +1,20 @@ +defmodule AshSqlite.CustomExtension do + @moduledoc """ + A custom extension implementation. + """ + + @callback install(version :: integer) :: String.t() + + @callback uninstall(version :: integer) :: String.t() + + defmacro __using__(name: name, latest_version: latest_version) do + quote do + @behaviour AshSqlite.CustomExtension + + @extension_name unquote(name) + @extension_latest_version unquote(latest_version) + + def extension, do: {@extension_name, @extension_latest_version, &install/1, &uninstall/1} + end + end +end diff --git a/lib/custom_index.ex b/lib/custom_index.ex new file mode 100644 index 0000000..49bb093 --- /dev/null +++ b/lib/custom_index.ex @@ -0,0 +1,109 @@ +defmodule AshSqlite.CustomIndex do + @moduledoc "Represents a custom index on the table backing a resource" + @fields [ + :table, + :fields, + :name, + :unique, + :using, + :where, + :include, + :message + ] + + defstruct @fields + + def fields, do: @fields + + @schema [ + fields: [ + type: {:wrap_list, {:or, [:atom, :string]}}, + doc: "The fields to include in the index." + ], + name: [ + type: :string, + doc: "the name of the index. Defaults to \"\#\{table\}_\#\{column\}_index\"." + ], + unique: [ + type: :boolean, + doc: "indicates whether the index should be unique.", + default: false + ], + using: [ + type: :string, + doc: "configures the index type." + ], + where: [ + type: :string, + doc: "specify conditions for a partial index." + ], + message: [ + type: :string, + doc: "A custom message to use for unique indexes that have been violated" + ], + include: [ + type: {:list, :string}, + doc: + "specify fields for a covering index. This is not supported by all databases. For more information on SQLite support, please read the official docs." + ] + ] + + def schema, do: @schema + + # sobelow_skip ["DOS.StringToAtom"] + def transform(%__MODULE__{fields: fields} = index) do + index = %{ + index + | fields: + Enum.map(fields, fn field -> + if is_atom(field) do + field + else + String.to_atom(field) + end + end) + } + + cond do + index.name -> + if Regex.match?(~r/^[0-9a-zA-Z_]+$/, index.name) do + {:ok, index} + else + {:error, + "Custom index name #{index.name} is not valid. Must have letters, numbers and underscores only"} + end + + mismatched_field = + Enum.find(index.fields, fn field -> + !Regex.match?(~r/^[0-9a-zA-Z_]+$/, to_string(field)) + end) -> + {:error, + """ + Custom index field #{mismatched_field} contains invalid index name characters. + + A name must be set manually, i.e + + `name: "your_desired_index_name"` + + Index names must have letters, numbers and underscores only + """} + + true -> + {:ok, index} + end + end + + def name(_resource, %{name: name}) when is_binary(name) do + name + end + + # sobelow_skip ["DOS.StringToAtom"] + def name(table, %{fields: fields}) do + [table, fields, "index"] + |> List.flatten() + |> Enum.map(&to_string(&1)) + |> Enum.map(&String.replace(&1, ~r"[^\w_]", "_")) + |> Enum.map_join("_", &String.replace_trailing(&1, "_", "")) + |> String.to_atom() + end +end diff --git a/lib/data_layer.ex b/lib/data_layer.ex new file mode 100644 index 0000000..3502fcc --- /dev/null +++ b/lib/data_layer.ex @@ -0,0 +1,1589 @@ +defmodule AshSqlite.DataLayer do + @index %Spark.Dsl.Entity{ + name: :index, + describe: """ + Add an index to be managed by the migration generator. + """, + examples: [ + "index [\"column\", \"column2\"], unique: true, where: \"thing = TRUE\"" + ], + target: AshSqlite.CustomIndex, + schema: AshSqlite.CustomIndex.schema(), + transform: {AshSqlite.CustomIndex, :transform, []}, + args: [:fields] + } + + @custom_indexes %Spark.Dsl.Section{ + name: :custom_indexes, + describe: """ + A section for configuring indexes to be created by the migration generator. + + In general, prefer to use `identities` for simple unique constraints. This is a tool to allow + for declaring more complex indexes. + """, + examples: [ + """ + custom_indexes do + index [:column1, :column2], unique: true, where: "thing = TRUE" + end + """ + ], + entities: [ + @index + ] + } + + @statement %Spark.Dsl.Entity{ + name: :statement, + describe: """ + Add a custom statement for migrations. + """, + examples: [ + """ + statement :pgweb_idx do + up "CREATE INDEX pgweb_idx ON pgweb USING GIN (to_tsvector('english', title || ' ' || body));" + down "DROP INDEX pgweb_idx;" + end + """ + ], + target: AshSqlite.Statement, + schema: AshSqlite.Statement.schema(), + args: [:name] + } + + @custom_statements %Spark.Dsl.Section{ + name: :custom_statements, + describe: """ + A section for configuring custom statements to be added to migrations. + + Changing custom statements may require manual intervention, because Ash can't determine what order they should run + in (i.e if they depend on table structure that you've added, or vice versa). As such, any `down` statements we run + for custom statements happen first, and any `up` statements happen last. + + Additionally, when changing a custom statement, we must make some assumptions, i.e that we should migrate + the old structure down using the previously configured `down` and recreate it. + + This may not be desired, and so what you may end up doing is simply modifying the old migration and deleting whatever was + generated by the migration generator. As always: read your migrations after generating them! + """, + examples: [ + """ + custom_statements do + # the name is used to detect if you remove or modify the statement + statement :pgweb_idx do + up "CREATE INDEX pgweb_idx ON pgweb USING GIN (to_tsvector('english', title || ' ' || body));" + down "DROP INDEX pgweb_idx;" + end + end + """ + ], + entities: [ + @statement + ] + } + + @reference %Spark.Dsl.Entity{ + name: :reference, + describe: """ + Configures the reference for a relationship in resource migrations. + + Keep in mind that multiple relationships can theoretically involve the same destination and foreign keys. + In those cases, you only need to configure the `reference` behavior for one of them. Any conflicts will result + in an error, across this resource and any other resources that share a table with this one. For this reason, + instead of adding a reference configuration for `:nothing`, its best to just leave the configuration out, as that + is the default behavior if *no* relationship anywhere has configured the behavior of that reference. + """, + examples: [ + "reference :post, on_delete: :delete, on_update: :update, name: \"comments_to_posts_fkey\"" + ], + args: [:relationship], + target: AshSqlite.Reference, + schema: AshSqlite.Reference.schema() + } + + @references %Spark.Dsl.Section{ + name: :references, + describe: """ + A section for configuring the references (foreign keys) in resource migrations. + + This section is only relevant if you are using the migration generator with this resource. + Otherwise, it has no effect. + """, + examples: [ + """ + references do + reference :post, on_delete: :delete, on_update: :update, name: "comments_to_posts_fkey" + end + """ + ], + entities: [@reference], + schema: [ + polymorphic_on_delete: [ + type: {:one_of, [:delete, :nilify, :nothing, :restrict]}, + doc: + "For polymorphic resources, configures the on_delete behavior of the automatically generated foreign keys to source tables." + ], + polymorphic_on_update: [ + type: {:one_of, [:update, :nilify, :nothing, :restrict]}, + doc: + "For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables." + ], + polymorphic_name: [ + type: {:one_of, [:update, :nilify, :nothing, :restrict]}, + doc: + "For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables." + ] + ] + } + + @references %Spark.Dsl.Section{ + name: :references, + describe: """ + A section for configuring the references (foreign keys) in resource migrations. + + This section is only relevant if you are using the migration generator with this resource. + Otherwise, it has no effect. + """, + examples: [ + """ + references do + reference :post, on_delete: :delete, on_update: :update, name: "comments_to_posts_fkey" + end + """ + ], + entities: [@reference], + schema: [ + polymorphic_on_delete: [ + type: {:one_of, [:delete, :nilify, :nothing, :restrict]}, + doc: + "For polymorphic resources, configures the on_delete behavior of the automatically generated foreign keys to source tables." + ], + polymorphic_on_update: [ + type: {:one_of, [:update, :nilify, :nothing, :restrict]}, + doc: + "For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables." + ], + polymorphic_name: [ + type: {:one_of, [:update, :nilify, :nothing, :restrict]}, + doc: + "For polymorphic resources, configures the on_update behavior of the automatically generated foreign keys to source tables." + ] + ] + } + + @sqlite %Spark.Dsl.Section{ + name: :sqlite, + describe: """ + Sqlite data layer configuration + """, + sections: [ + @custom_indexes, + @custom_statements, + @references + ], + modules: [ + :repo + ], + examples: [ + """ + sqlite do + repo MyApp.Repo + table "organizations" + end + """ + ], + schema: [ + repo: [ + type: :atom, + required: true, + doc: + "The repo that will be used to fetch your data. See the `AshSqlite.Repo` documentation for more" + ], + migrate?: [ + type: :boolean, + default: true, + doc: + "Whether or not to include this resource in the generated migrations with `mix ash.generate_migrations`" + ], + migration_types: [ + type: :keyword_list, + default: [], + doc: + "A keyword list of attribute names to the ecto migration type that should be used for that attribute. Only necessary if you need to override the defaults." + ], + migration_defaults: [ + type: :keyword_list, + default: [], + doc: """ + A keyword list of attribute names to the ecto migration default that should be used for that attribute. The string you use will be placed verbatim in the migration. Use fragments like `fragment(\\\\"now()\\\\")`, or for `nil`, use `\\\\"nil\\\\"`. + """ + ], + base_filter_sql: [ + type: :string, + doc: + "A raw sql version of the base_filter, e.g `representative = true`. Required if trying to create a unique constraint on a resource with a base_filter" + ], + skip_unique_indexes: [ + type: {:wrap_list, :atom}, + default: false, + doc: "Skip generating unique indexes when generating migrations" + ], + unique_index_names: [ + type: + {:list, + {:or, + [{:tuple, [{:list, :atom}, :string]}, {:tuple, [{:list, :atom}, :string, :string]}]}}, + default: [], + doc: """ + A list of unique index names that could raise errors that are not configured in identities, or an mfa to a function that takes a changeset and returns the list. In the format `{[:affected, :keys], "name_of_constraint"}` or `{[:affected, :keys], "name_of_constraint", "custom error message"}` + """ + ], + exclusion_constraint_names: [ + type: :any, + default: [], + doc: """ + A list of exclusion constraint names that could raise errors. Must be in the format `{:affected_key, "name_of_constraint"}` or `{:affected_key, "name_of_constraint", "custom error message"}` + """ + ], + identity_index_names: [ + type: :any, + default: [], + doc: """ + A keyword list of identity names to the unique index name that they should use when being managed by the migration generator. + """ + ], + foreign_key_names: [ + type: {:list, {:or, [{:tuple, [:atom, :string]}, {:tuple, [:string, :string]}]}}, + default: [], + doc: """ + A list of foreign keys that could raise errors, or an mfa to a function that takes a changeset and returns a list. In the format: `{:key, "name_of_constraint"}` or `{:key, "name_of_constraint", "custom error message"}` + """ + ], + migration_ignore_attributes: [ + type: {:list, :atom}, + default: [], + doc: """ + A list of attributes that will be ignored when generating migrations. + """ + ], + table: [ + type: :string, + doc: """ + The table to store and read the resource from. If this is changed, the migration generator will not remove the old table. + """ + ], + polymorphic?: [ + type: :boolean, + default: false, + doc: """ + Declares this resource as polymorphic. See the [polymorphic resources guide](/documentation/topics/resources/polymorphic-resources.md) for more. + """ + ] + ] + } + + @behaviour Ash.DataLayer + + @sections [@sqlite] + + @moduledoc """ + A sqlite data layer that leverages Ecto's sqlite capabilities. + """ + + use Spark.Dsl.Extension, + sections: @sections, + transformers: [ + AshSqlite.Transformers.ValidateReferences, + AshSqlite.Transformers.VerifyRepo, + AshSqlite.Transformers.EnsureTableOrPolymorphic + ] + + def migrate(args) do + # TODO: take args that we care about + Mix.Task.run("ash_sqlite.migrate", args) + end + + def rollback(args) do + repos = AshSqlite.Mix.Helpers.repos!([], args) + + show_for_repo? = Enum.count_until(repos, 2) == 2 + + for repo <- repos do + for_repo = + if show_for_repo? do + " for repo #{inspect(repo)}" + else + "" + end + + migrations_path = AshSqlite.Mix.Helpers.migrations_path([], repo) + + files = + migrations_path + |> Path.join("**/*.exs") + |> Path.wildcard() + |> Enum.sort() + |> Enum.reverse() + |> Enum.take(20) + |> Enum.map(&String.trim_leading(&1, migrations_path)) + |> Enum.with_index() + |> Enum.map(fn {file, index} -> "#{index + 1}: #{file}" end) + + n = + Mix.shell().prompt( + """ + How many migrations should be rolled back#{for_repo}? (default: 0) + + Last 20 migration names, with the input you must provide to + rollback up to *and including* that migration: + + #{Enum.join(files, "\n")} + Rollback to: + """ + |> String.trim_trailing() + ) + |> String.trim() + |> case do + "" -> + 0 + + n -> + try do + String.to_integer(n) + rescue + _ -> + # credo:disable-for-next-line + raise "Required an integer value, got: #{n}" + end + end + + Mix.Task.run("ash_postgres.rollback", args ++ ["-r", inspect(repo), "-n", to_string(n)]) + Mix.Task.reenable("ash_postgres.rollback") + end + end + + def codegen(args) do + # TODO: take args that we care about + Mix.Task.run("ash_sqlite.generate_migrations", args) + end + + def setup(args) do + # TODO: take args that we care about + Mix.Task.run("ash_sqlite.create", args) + Mix.Task.run("ash_sqlite.migrate", args) + end + + def tear_down(args) do + # TODO: take args that we care about + Mix.Task.run("ash_sqlite.drop", args) + end + + import Ecto.Query, only: [from: 2, subquery: 1] + + @impl true + def can?(_, :async_engine), do: false + def can?(_, :bulk_create), do: true + def can?(_, {:lock, _}), do: false + + def can?(_, :transact), do: false + def can?(_, :composite_primary_key), do: true + def can?(_, {:atomic, :update}), do: true + def can?(_, {:atomic, :upsert}), do: true + def can?(_, :upsert), do: true + def can?(_, :changeset_filter), do: true + + def can?(resource, {:join, other_resource}) do + data_layer = Ash.DataLayer.data_layer(resource) + other_data_layer = Ash.DataLayer.data_layer(other_resource) + + data_layer == other_data_layer and + AshSqlite.DataLayer.Info.repo(resource) == AshSqlite.DataLayer.Info.repo(other_resource) + end + + def can?(_resource, {:lateral_join, _}) do + false + end + + def can?(_, :boolean_filter), do: true + + def can?(_, {:aggregate, _type}), do: false + + def can?(_, :aggregate_filter), do: false + def can?(_, :aggregate_sort), do: false + def can?(_, :expression_calculation), do: true + def can?(_, :expression_calculation_sort), do: true + def can?(_, :create), do: true + def can?(_, :select), do: true + def can?(_, :read), do: true + + def can?(resource, action) when action in ~w[update destroy]a do + resource + |> Ash.Resource.Info.primary_key() + |> Enum.any?() + end + + def can?(_, :filter), do: true + def can?(_, :limit), do: true + def can?(_, :offset), do: true + def can?(_, :multitenancy), do: false + + def can?(_, {:filter_relationship, %{manual: {module, _}}}) do + Spark.implements_behaviour?(module, AshSqlite.ManualRelationship) + end + + def can?(_, {:filter_relationship, _}), do: true + + def can?(_, {:aggregate_relationship, _}), do: false + + def can?(_, :timeout), do: true + def can?(_, {:filter_expr, %Ash.Query.Function.StringJoin{}}), do: false + def can?(_, {:filter_expr, _}), do: true + def can?(_, :nested_expressions), do: true + def can?(_, {:query_aggregate, _}), do: true + def can?(_, :sort), do: true + def can?(_, :distinct_sort), do: false + def can?(_, :distinct), do: false + def can?(_, {:sort, _}), do: true + def can?(_, _), do: false + + @impl true + def limit(query, nil, _), do: {:ok, query} + + def limit(query, limit, _resource) do + {:ok, from(row in query, limit: ^limit)} + end + + @impl true + def source(resource) do + AshSqlite.DataLayer.Info.table(resource) || "" + end + + @impl true + def set_context(resource, data_layer_query, context) do + start_bindings = context[:data_layer][:start_bindings_at] || 0 + data_layer_query = from(row in data_layer_query, as: ^start_bindings) + + data_layer_query = + if context[:data_layer][:table] do + %{ + data_layer_query + | from: %{data_layer_query.from | source: {context[:data_layer][:table], resource}} + } + else + data_layer_query + end + + {:ok, + AshSql.Bindings.default_bindings( + data_layer_query, + resource, + AshSqlite.SqlImplementation, + context + )} + end + + @impl true + def offset(query, nil, _), do: query + + def offset(%{offset: old_offset} = query, 0, _resource) when old_offset in [0, nil] do + {:ok, query} + end + + def offset(query, offset, _resource) do + {:ok, from(row in query, offset: ^offset)} + end + + @impl true + def run_aggregate_query(query, aggregates, resource) do + {exists, aggregates} = Enum.split_with(aggregates, &(&1.kind == :exists)) + query = AshSql.Bindings.default_bindings(query, resource, AshSqlite.SqlImplementation) + + query = + if query.limit do + query = + query + |> Ecto.Query.exclude(:select) + |> Ecto.Query.exclude(:order_by) + |> Map.put(:windows, []) + + from(row in subquery(query), as: ^0, select: %{}) + else + query + |> Ecto.Query.exclude(:select) + |> Ecto.Query.exclude(:order_by) + |> Map.put(:windows, []) + |> Ecto.Query.select(%{}) + end + + query_before_select = query + + query = + Enum.reduce( + aggregates, + query, + fn agg, query -> + AshSql.Aggregate.add_subquery_aggregate_select( + query, + agg.relationship_path |> Enum.drop(1), + agg, + resource, + true, + Ash.Resource.Info.relationship(resource, agg.relationship_path |> Enum.at(1)) + ) + end + ) + + result = + case aggregates do + [] -> + %{} + + _ -> + dynamic_repo(resource, query).one(query, repo_opts(nil, nil, resource)) + end + + {:ok, add_exists_aggs(result, resource, query_before_select, exists)} + end + + defp add_exists_aggs(result, resource, query, exists) do + repo = dynamic_repo(resource, query) + repo_opts = repo_opts(nil, nil, resource) + + Enum.reduce(exists, result, fn agg, result -> + {:ok, filtered} = + case agg do + %{query: %{filter: filter}} when not is_nil(filter) -> + filter(query, filter, resource) + + _ -> + {:ok, query} + end + + Map.put( + result || %{}, + agg.name, + repo.exists?(filtered, repo_opts) + ) + end) + end + + @impl true + def run_query(query, resource) do + with_sort_applied = + if query.__ash_bindings__[:sort_applied?] do + {:ok, query} + else + AshSql.Sort.apply_sort(query, query.__ash_bindings__[:sort], resource) + end + + case with_sort_applied do + {:error, error} -> + {:error, error} + + {:ok, query} -> + query = + if query.__ash_bindings__[:__order__?] && query.windows[:order] do + order_by = %{query.windows[:order] | expr: query.windows[:order].expr[:order_by]} + + %{ + query + | windows: Keyword.delete(query.windows, :order), + order_bys: [order_by] + } + else + %{query | windows: Keyword.delete(query.windows, :order)} + end + + if AshSqlite.DataLayer.Info.polymorphic?(resource) && no_table?(query) do + raise_table_error!(resource, :read) + else + primary_key = Ash.Resource.Info.primary_key(resource) + + {:ok, + dynamic_repo(resource, query).all(query, repo_opts(nil, nil, resource)) + |> Enum.uniq_by(&Map.take(&1, primary_key))} + end + end + rescue + e -> + handle_raised_error(e, __STACKTRACE__, query, resource) + end + + defp no_table?(%{from: %{source: {"", _}}}), do: true + defp no_table?(_), do: false + + defp repo_opts(timeout, nil, _resource) do + [] + |> add_timeout(timeout) + end + + defp repo_opts(timeout, _resource) do + add_timeout([], timeout) + end + + defp add_timeout(opts, timeout) when not is_nil(timeout) do + Keyword.put(opts, :timeout, timeout) + end + + defp add_timeout(opts, _), do: opts + + @impl true + def functions(_resource) do + [ + AshSqlite.Functions.Like, + AshSqlite.Functions.ILike + ] + end + + @impl true + def resource_to_query(resource, _) do + from(row in {AshSqlite.DataLayer.Info.table(resource) || "", resource}, []) + end + + @impl true + def bulk_create(resource, stream, options) do + changesets = Enum.to_list(stream) + + repo = dynamic_repo(resource, Enum.at(changesets, 0)) + + opts = repo_opts(nil, options[:tenant], resource) + + source = resolve_source(resource, Enum.at(changesets, 0)) + + try do + opts = + if options[:upsert?] do + raise "MySQL datalayer doesn't (yet?) know to upsert in bulk_create" + ## Ash groups changesets by atomics before dispatching them to the data layer + ## this means that all changesets have the same atomics + # %{atomics: atomics, filter: filter} = Enum.at(changesets, 0) + + # query = from(row in resource, as: ^0) + + # query = + # query + # |> AshSql.Bindings.default_bindings(resource, AshSqlite.SqlImplementation) + + # upsert_set = + # upsert_set(resource, changesets, options) + + # on_conflict = + # case AshSql.Atomics.query_with_atomics( + # resource, + # query, + # filter, + # atomics, + # %{}, + # upsert_set + # ) do + # :empty -> + # :nothing + + # {:ok, query} -> + # query + + # {:error, error} -> + # raise Ash.Error.to_ash_error(error) + # end + + # opts + # |> Keyword.put(:on_conflict, on_conflict) + # |> Keyword.put( + # :conflict_target, + # conflict_target( + # resource, + # options[:upsert_keys] || Ash.Resource.Info.primary_key(resource) + # ) + # ) + else + opts + end + + ecto_changesets = Enum.map(changesets, & &1.attributes) + + opts = + if schema = Enum.at(changesets, 0).context[:data_layer][:schema] do + Keyword.put(opts, :prefix, schema) + else + opts + end + + resource_for_returning = if options.return_records?, do: resource, else: nil + + result = insert_all_returning(source, ecto_changesets, repo, resource_for_returning, opts) + + case result do + {_, nil} -> + :ok + + {_, results} -> + if options[:single?] do + {:ok, results} + else + {:ok, + Stream.zip_with(results, changesets, fn result, changeset -> + Ash.Resource.put_metadata( + result, + :bulk_create_index, + changeset.context.bulk_create.index + ) + end)} + end + end + rescue + e -> + changeset = Ash.Changeset.new(resource) + + handle_raised_error( + e, + __STACKTRACE__, + {:bulk_create, ecto_changeset(changeset.data, changeset, :create, false)}, + resource + ) + end + end + + defp insert_all_returning(source, entries, repo, nil, opts) do + repo.insert_all(source, entries, opts) + end + + defp insert_all_returning(source, entries, repo, resource, opts) do + {count, nil} = repo.insert_all(source, entries, opts) + reload_key = Ash.Resource.Info.primary_key(resource) |> Enum.at(0) + keys_to_reload = entries |> Enum.map(&Map.get(&1, reload_key)) + + unordered = Ecto.Query.from(s in source, where: s.id in ^keys_to_reload) |> repo.all() + indexed = unordered |> Enum.group_by(&Map.get(&1, reload_key)) + + ordered = + keys_to_reload + |> Enum.map(&(Map.get(indexed, &1) |> Enum.at(0))) + + {count, ordered} + end + + defp upsert_set(resource, changesets, options) do + attributes_changing_anywhere = + changesets |> Enum.flat_map(&Map.keys(&1.attributes)) |> Enum.uniq() + + update_defaults = update_defaults(resource) + # We can't reference EXCLUDED if at least one of the changesets in the stream is not + # changing the value (and we wouldn't want to even if we could as it would be unnecessary) + + upsert_fields = + (options[:upsert_fields] || []) |> Enum.filter(&(&1 in attributes_changing_anywhere)) + + fields_to_upsert = + (upsert_fields ++ Keyword.keys(update_defaults)) -- + Keyword.keys(Enum.at(changesets, 0).atomics) + + Enum.map(fields_to_upsert, fn upsert_field -> + # for safety, we check once more at the end that all values in + # upsert_fields are names of attributes. This is because + # below we use `literal/1` to bring them into the query + if is_nil(resource.__schema__(:type, upsert_field)) do + raise "Only attribute names can be used in upsert_fields" + end + + case Keyword.fetch(update_defaults, upsert_field) do + {:ok, default} -> + if upsert_field in upsert_fields do + {upsert_field, + Ecto.Query.dynamic( + [], + fragment( + "COALESCE(?, ?)", + literal(^to_string(upsert_field)), + ^default + ) + )} + else + {upsert_field, default} + end + + :error -> + {upsert_field, + Ecto.Query.dynamic( + [], + fragment("?", literal(^to_string(upsert_field))) + )} + end + end) + end + + @impl true + def create(resource, changeset) do + changeset = %{ + changeset + | data: + Map.update!( + changeset.data, + :__meta__, + &Map.put(&1, :source, table(resource, changeset)) + ) + } + + case bulk_create(resource, [changeset], %{ + single?: true, + tenant: changeset.tenant, + return_records?: true + }) do + {:ok, [result]} -> + {:ok, result} + + {:error, error} -> + {:error, error} + end + end + + defp handle_errors({:error, %Ecto.Changeset{errors: errors}}) do + {:error, Enum.map(errors, &to_ash_error/1)} + end + + defp to_ash_error({field, {message, vars}}) do + Ash.Error.Changes.InvalidAttribute.exception( + field: field, + message: message, + private_vars: vars + ) + end + + defp ecto_changeset(record, changeset, type, table_error? \\ true) do + filters = + if changeset.action_type == :create do + %{} + else + Map.get(changeset, :filters, %{}) + end + + filters = + if changeset.action_type == :create do + filters + else + changeset.resource + |> Ash.Resource.Info.primary_key() + |> Enum.reduce(filters, fn key, filters -> + Map.put(filters, key, Map.get(record, key)) + end) + end + + attributes = + changeset.resource + |> Ash.Resource.Info.attributes() + |> Enum.map(& &1.name) + + attributes_to_change = + Enum.reject(attributes, fn attribute -> + Keyword.has_key?(changeset.atomics, attribute) + end) + + ecto_changeset = + record + |> to_ecto() + |> set_table(changeset, type, table_error?) + |> Ecto.Changeset.change(Map.take(changeset.attributes, attributes_to_change)) + |> Map.update!(:filters, &Map.merge(&1, filters)) + |> add_configured_foreign_key_constraints(record.__struct__) + |> add_unique_indexes(record.__struct__, changeset) + |> add_exclusion_constraints(record.__struct__) + + case type do + :create -> + ecto_changeset + |> add_my_foreign_key_constraints(record.__struct__) + + type when type in [:upsert, :update] -> + ecto_changeset + |> add_my_foreign_key_constraints(record.__struct__) + |> add_related_foreign_key_constraints(record.__struct__) + + :delete -> + ecto_changeset + |> add_related_foreign_key_constraints(record.__struct__) + end + end + + defp handle_raised_error( + %Ecto.StaleEntryError{changeset: %{data: %resource{}, filters: filters}}, + stacktrace, + context, + resource + ) do + handle_raised_error( + Ash.Error.Changes.StaleRecord.exception(resource: resource, filters: filters), + stacktrace, + context, + resource + ) + end + + defp handle_raised_error(%Ecto.Query.CastError{} = e, stacktrace, context, resource) do + handle_raised_error( + Ash.Error.Query.InvalidFilterValue.exception(value: e.value, context: context), + stacktrace, + context, + resource + ) + end + + defp handle_raised_error( + %MyXQL.Error{ + mysql: %{ + code: 1452, + name: :ER_NO_REFERENCED_ROW_2 + } + }, + stacktrace, + context, + resource + ) do + handle_raised_error( + Ash.Error.Changes.InvalidChanges.exception( + fields: Ash.Resource.Info.primary_key(resource), + message: "referenced something that does not exist" + ), + stacktrace, + context, + resource + ) + end + + defp handle_raised_error( + %MyXQL.Error{ + mysql: %{code: 1062, name: :ER_DUP_ENTRY} + }, + _stacktrace, + _context, + resource + ) do + fields="" + names = + fields + |> String.split(", ") + |> Enum.map(fn field -> + field |> String.split(".", trim: true) |> Enum.drop(1) |> Enum.at(0) + end) + |> Enum.map(fn field -> + Ash.Resource.Info.attribute(resource, field) + end) + |> Enum.reject(&is_nil/1) + #|> Enum.map(fn %{name: name} -> + # name + #end) + + names=[:id] + message = find_constraint_message(resource, names) + + {:error, + names + |> Enum.map(fn name -> + Ash.Error.Changes.InvalidAttribute.exception( + field: name, + message: message + ) + end)} + end + + defp handle_raised_error(error, stacktrace, _ecto_changeset, _resource) do + {:error, Ash.Error.to_ash_error(error, stacktrace)} + end + + defp find_constraint_message(resource, names) do + find_custom_index_message(resource, names) || find_identity_message(resource, names) || + "has already been taken" + end + + defp find_custom_index_message(resource, names) do + resource + |> AshSqlite.DataLayer.Info.custom_indexes() + |> Enum.find(fn %{fields: fields} -> + fields |> Enum.map(&to_string/1) |> Enum.sort() == + names |> Enum.map(&to_string/1) |> Enum.sort() + end) + |> case do + %{message: message} when is_binary(message) -> message + _ -> nil + end + end + + defp find_identity_message(resource, names) do + resource + |> Ash.Resource.Info.identities() + |> Enum.find(fn %{keys: fields} -> + fields |> Enum.map(&to_string/1) |> Enum.sort() == + names |> Enum.map(&to_string/1) |> Enum.sort() + end) + |> case do + %{message: message} when is_binary(message) -> + message + + _ -> + nil + end + end + + defp set_table(record, changeset, operation, table_error?) do + if AshSqlite.DataLayer.Info.polymorphic?(record.__struct__) do + table = + changeset.context[:data_layer][:table] || + AshSqlite.DataLayer.Info.table(record.__struct__) + + if table do + Ecto.put_meta(record, source: table) + else + if table_error? do + raise_table_error!(changeset.resource, operation) + else + record + end + end + else + record + end + end + + def from_ecto({:ok, result}), do: {:ok, from_ecto(result)} + def from_ecto({:error, _} = other), do: other + + def from_ecto(nil), do: nil + + def from_ecto(value) when is_list(value) do + Enum.map(value, &from_ecto/1) + end + + def from_ecto(%resource{} = record) do + if Spark.Dsl.is?(resource, Ash.Resource) do + empty = struct(resource) + + resource + |> Ash.Resource.Info.relationships() + |> Enum.reduce(record, fn relationship, record -> + case Map.get(record, relationship.name) do + %Ecto.Association.NotLoaded{} -> + Map.put(record, relationship.name, Map.get(empty, relationship.name)) + + value -> + Map.put(record, relationship.name, from_ecto(value)) + end + end) + else + record + end + end + + def from_ecto(other), do: other + + def to_ecto(nil), do: nil + + def to_ecto(value) when is_list(value) do + Enum.map(value, &to_ecto/1) + end + + def to_ecto(%resource{} = record) do + if Spark.Dsl.is?(resource, Ash.Resource) do + resource + |> Ash.Resource.Info.relationships() + |> Enum.reduce(record, fn relationship, record -> + value = + case Map.get(record, relationship.name) do + %Ash.NotLoaded{} -> + %Ecto.Association.NotLoaded{ + __field__: relationship.name, + __cardinality__: relationship.cardinality + } + + value -> + to_ecto(value) + end + + Map.put(record, relationship.name, value) + end) + else + record + end + end + + def to_ecto(other), do: other + + defp add_exclusion_constraints(changeset, resource) do + resource + |> AshSqlite.DataLayer.Info.exclusion_constraint_names() + |> Enum.reduce(changeset, fn constraint, changeset -> + case constraint do + {key, name} -> + Ecto.Changeset.exclusion_constraint(changeset, key, name: name) + + {key, name, message} -> + Ecto.Changeset.exclusion_constraint(changeset, key, name: name, message: message) + end + end) + end + + defp add_related_foreign_key_constraints(changeset, resource) do + # TODO: this doesn't guarantee us to get all of them, because if something is related to this + # schema and there is no back-relation, then this won't catch it's foreign key constraints + resource + |> Ash.Resource.Info.relationships() + |> Enum.map(& &1.destination) + |> Enum.uniq() + |> Enum.flat_map(fn related -> + related + |> Ash.Resource.Info.relationships() + |> Enum.filter(&(&1.destination == resource)) + |> Enum.map(&Map.take(&1, [:source, :source_attribute, :destination_attribute, :name])) + end) + |> Enum.reduce(changeset, fn %{ + source: source, + source_attribute: source_attribute, + destination_attribute: destination_attribute, + name: relationship_name + }, + changeset -> + case AshSqlite.DataLayer.Info.reference(resource, relationship_name) do + %{name: name} when not is_nil(name) -> + Ecto.Changeset.foreign_key_constraint(changeset, destination_attribute, + name: name, + message: "would leave records behind" + ) + + _ -> + Ecto.Changeset.foreign_key_constraint(changeset, destination_attribute, + name: "#{AshSqlite.DataLayer.Info.table(source)}_#{source_attribute}_fkey", + message: "would leave records behind" + ) + end + end) + end + + defp add_my_foreign_key_constraints(changeset, resource) do + resource + |> Ash.Resource.Info.relationships() + |> Enum.reduce(changeset, &Ecto.Changeset.foreign_key_constraint(&2, &1.source_attribute)) + end + + defp add_configured_foreign_key_constraints(changeset, resource) do + resource + |> AshSqlite.DataLayer.Info.foreign_key_names() + |> case do + {m, f, a} -> List.wrap(apply(m, f, [changeset | a])) + value -> List.wrap(value) + end + |> Enum.reduce(changeset, fn + {key, name}, changeset -> + Ecto.Changeset.foreign_key_constraint(changeset, key, name: name) + + {key, name, message}, changeset -> + Ecto.Changeset.foreign_key_constraint(changeset, key, name: name, message: message) + end) + end + + defp add_unique_indexes(changeset, resource, ash_changeset) do + changeset = + resource + |> Ash.Resource.Info.identities() + |> Enum.reduce(changeset, fn identity, changeset -> + name = + AshSqlite.DataLayer.Info.identity_index_names(resource)[identity.name] || + "#{table(resource, ash_changeset)}_#{identity.name}_index" + + opts = + if Map.get(identity, :message) do + [name: name, message: identity.message] + else + [name: name] + end + + Ecto.Changeset.unique_constraint(changeset, identity.keys, opts) + end) + + changeset = + resource + |> AshSqlite.DataLayer.Info.custom_indexes() + |> Enum.reduce(changeset, fn index, changeset -> + opts = + if index.message do + [name: index.name, message: index.message] + else + [name: index.name] + end + + Ecto.Changeset.unique_constraint(changeset, index.fields, opts) + end) + + names = + resource + |> AshSqlite.DataLayer.Info.unique_index_names() + |> case do + {m, f, a} -> List.wrap(apply(m, f, [changeset | a])) + value -> List.wrap(value) + end + + names = + case Ash.Resource.Info.primary_key(resource) do + [] -> + names + + fields -> + if table = table(resource, ash_changeset) do + [{fields, table <> "_pkey"} | names] + else + [] + end + end + + Enum.reduce(names, changeset, fn + {keys, name}, changeset -> + Ecto.Changeset.unique_constraint(changeset, List.wrap(keys), name: name) + + {keys, name, message}, changeset -> + Ecto.Changeset.unique_constraint(changeset, List.wrap(keys), name: name, message: message) + end) + end + + @impl true + def upsert(resource, changeset, keys \\ nil) do + keys = keys || Ash.Resource.Info.primary_key(keys) + + explicitly_changing_attributes = + Map.keys(changeset.attributes) -- Map.get(changeset, :defaults, []) -- keys + + upsert_fields = + changeset.context[:private][:upsert_fields] || explicitly_changing_attributes + + case bulk_create(resource, [changeset], %{ + single?: true, + upsert?: true, + tenant: changeset.tenant, + upsert_keys: keys, + upsert_fields: upsert_fields, + return_records?: true + }) do + {:ok, [result]} -> + {:ok, result} + + {:error, error} -> + {:error, error} + end + end + + defp conflict_target(resource, keys) do + if Ash.Resource.Info.base_filter(resource) do + base_filter_sql = + AshSqlite.DataLayer.Info.base_filter_sql(resource) || + raise """ + Cannot use upserts with resources that have a base_filter without also adding `base_filter_sql` in the sqlite section. + """ + + sources = + Enum.map(keys, fn key -> + ~s("#{Ash.Resource.Info.attribute(resource, key).source || key}") + end) + + {:unsafe_fragment, "(" <> Enum.join(sources, ", ") <> ") WHERE (#{base_filter_sql})"} + else + keys + end + end + + defp update_defaults(resource) do + attributes = + resource + |> Ash.Resource.Info.attributes() + |> Enum.reject(&is_nil(&1.update_default)) + + attributes + |> static_defaults() + |> Enum.concat(lazy_matching_defaults(attributes)) + |> Enum.concat(lazy_non_matching_defaults(attributes)) + end + + defp static_defaults(attributes) do + attributes + |> Enum.reject(&get_default_fun(&1)) + |> Enum.map(&{&1.name, &1.update_default}) + end + + defp lazy_non_matching_defaults(attributes) do + attributes + |> Enum.filter(&(!&1.match_other_defaults? && get_default_fun(&1))) + |> Enum.map(fn attribute -> + default_value = + case attribute.update_default do + function when is_function(function) -> + function.() + + {m, f, a} when is_atom(m) and is_atom(f) and is_list(a) -> + apply(m, f, a) + end + + {attribute.name, default_value} + end) + end + + defp lazy_matching_defaults(attributes) do + attributes + |> Enum.filter(&(&1.match_other_defaults? && get_default_fun(&1))) + |> Enum.group_by(& &1.update_default) + |> Enum.flat_map(fn {default_fun, attributes} -> + default_value = + case default_fun do + function when is_function(function) -> + function.() + + {m, f, a} when is_atom(m) and is_atom(f) and is_list(a) -> + apply(m, f, a) + end + + Enum.map(attributes, &{&1.name, default_value}) + end) + end + + defp get_default_fun(attribute) do + if is_function(attribute.update_default) or match?({_, _, _}, attribute.update_default) do + attribute.update_default + end + end + + @impl true + def update(resource, changeset) do + ecto_changeset = + changeset.data + |> Map.update!(:__meta__, &Map.put(&1, :source, table(resource, changeset))) + |> ecto_changeset(changeset, :update) + + try do + query = from(row in resource, as: ^0) + + select = Keyword.keys(changeset.atomics) ++ Ash.Resource.Info.primary_key(resource) + + query = + query + |> AshSql.Bindings.default_bindings( + resource, + AshSqlite.SqlImplementation, + changeset.context + ) + #|> Ecto.Query.select(^select) + |> pkey_filter(changeset.data) + + case AshSql.Atomics.query_with_atomics( + resource, + query, + changeset.filter, + changeset.atomics, + ecto_changeset.changes, + [] + ) do + :empty -> + {:ok, changeset.data} + + {:ok, query} -> + repo_opts = repo_opts(changeset.timeout, changeset.tenant, changeset.resource) + + #repo_opts = + # Keyword.put(repo_opts, :returning, Keyword.keys(changeset.atomics)) + + repo = dynamic_repo(resource, changeset) + {count, nil} = + repo.update_all(query, [], repo_opts) + + result = from(row in resource, as: ^0, select: ^select) + |> pkey_filter(changeset.data) + |> repo.all() + + case {count, result} do + {0, []} -> + {:error, + Ash.Error.Changes.StaleRecord.exception( + resource: resource, + filters: changeset.filter + )} + + {1, [result]} -> + record = + changeset.data + |> Map.merge(changeset.attributes) + |> Map.merge(Map.take(result, Keyword.keys(changeset.atomics))) + + {:ok, record} + end + + {:error, error} -> + {:error, error} + end + rescue + e -> + handle_raised_error(e, __STACKTRACE__, ecto_changeset, resource) + end + end + + defp pkey_filter(query, %resource{} = record) do + pkey = + record + |> Map.take(Ash.Resource.Info.primary_key(resource)) + |> Map.to_list() + + Ecto.Query.where(query, ^pkey) + end + + @impl true + def destroy(resource, %{data: record} = changeset) do + ecto_changeset = ecto_changeset(record, changeset, :delete) + + try do + ecto_changeset + |> dynamic_repo(resource, changeset).delete( + repo_opts(changeset.timeout, changeset.resource) + ) + |> from_ecto() + |> case do + {:ok, _record} -> + :ok + + {:error, error} -> + handle_errors({:error, error}) + end + rescue + e -> + handle_raised_error(e, __STACKTRACE__, ecto_changeset, resource) + end + end + + @impl true + def sort(query, sort, _resource) do + {:ok, Map.update!(query, :__ash_bindings__, &Map.put(&1, :sort, sort))} + end + + @impl true + def select(query, select, resource) do + query = AshSql.Bindings.default_bindings(query, resource, AshSqlite.SqlImplementation) + + {:ok, + from(row in query, + select: struct(row, ^Enum.uniq(select)) + )} + end + + @doc false + def unwrap_one([thing]), do: thing + def unwrap_one([]), do: nil + def unwrap_one(other), do: other + + @impl true + def filter(query, filter, _resource, opts \\ []) do + query + |> AshSql.Join.join_all_relationships(filter, opts) + |> case do + {:ok, query} -> + {:ok, AshSql.Filter.add_filter_expression(query, filter)} + + {:error, error} -> + {:error, error} + end + end + + @impl true + def add_calculations(query, calculations, resource) do + AshSql.Calculation.add_calculations(query, calculations, resource, 0, true) + end + + @doc false + def get_binding(resource, path, query, type, name_match \\ nil) + + def get_binding(resource, path, %{__ash_bindings__: _} = query, type, name_match) do + types = List.wrap(type) + + Enum.find_value(query.__ash_bindings__.bindings, fn + {binding, %{path: candidate_path, type: binding_type} = data} -> + if binding_type in types do + if name_match do + if data[:name] == name_match do + if Ash.SatSolver.synonymous_relationship_paths?(resource, candidate_path, path) do + binding + end + end + else + if Ash.SatSolver.synonymous_relationship_paths?(resource, candidate_path, path) do + binding + else + false + end + end + end + + _ -> + nil + end) + end + + def get_binding(_, _, _, _, _), do: nil + + @doc false + def add_binding(query, data, additional_bindings \\ 0) do + current = query.__ash_bindings__.current + bindings = query.__ash_bindings__.bindings + + new_ash_bindings = %{ + query.__ash_bindings__ + | bindings: Map.put(bindings, current, data), + current: current + 1 + additional_bindings + } + + %{query | __ash_bindings__: new_ash_bindings} + end + + def add_known_binding(query, data, known_binding) do + bindings = query.__ash_bindings__.bindings + + new_ash_bindings = %{ + query.__ash_bindings__ + | bindings: Map.put(bindings, known_binding, data) + } + + %{query | __ash_bindings__: new_ash_bindings} + end + + @impl true + def rollback(resource, term) do + AshSqlite.DataLayer.Info.repo(resource).rollback(term) + end + + defp table(resource, changeset) do + changeset.context[:data_layer][:table] || AshSqlite.DataLayer.Info.table(resource) + end + + defp raise_table_error!(resource, operation) do + if AshSqlite.DataLayer.Info.polymorphic?(resource) do + raise """ + Could not determine table for #{operation} on #{inspect(resource)}. + + Polymorphic resources require that the `data_layer[:table]` context is provided. + See the guide on polymorphic resources for more information. + """ + else + raise """ + Could not determine table for #{operation} on #{inspect(resource)}. + """ + end + end + + defp dynamic_repo(resource, %{__ash_bindings__: %{context: %{data_layer: %{repo: repo}}}}) do + repo || AshSqlite.DataLayer.Info.repo(resource) + end + + defp dynamic_repo(resource, %{context: %{data_layer: %{repo: repo}}}) do + repo || AshSqlite.DataLayer.Info.repo(resource) + end + + defp dynamic_repo(resource, _) do + AshSqlite.DataLayer.Info.repo(resource) + end + + defp resolve_source(resource, changeset) do + if table = changeset.context[:data_layer][:table] do + {table, resource} + else + resource + end + end +end diff --git a/lib/data_layer/info.ex b/lib/data_layer/info.ex new file mode 100644 index 0000000..abb5193 --- /dev/null +++ b/lib/data_layer/info.ex @@ -0,0 +1,117 @@ +defmodule AshSqlite.DataLayer.Info do + @moduledoc "Introspection functions for " + + alias Spark.Dsl.Extension + + @doc "The configured repo for a resource" + def repo(resource) do + Extension.get_opt(resource, [:sqlite], :repo, nil, true) + end + + @doc "The configured table for a resource" + def table(resource) do + Extension.get_opt(resource, [:sqlite], :table, nil, true) + end + + @doc "The configured references for a resource" + def references(resource) do + Extension.get_entities(resource, [:sqlite, :references]) + end + + @doc "The configured reference for a given relationship of a resource" + def reference(resource, relationship) do + resource + |> Extension.get_entities([:sqlite, :references]) + |> Enum.find(&(&1.relationship == relationship)) + end + + @doc "A keyword list of customized migration types" + def migration_types(resource) do + Extension.get_opt(resource, [:sqlite], :migration_types, []) + end + + @doc "A keyword list of customized migration defaults" + def migration_defaults(resource) do + Extension.get_opt(resource, [:sqlite], :migration_defaults, []) + end + + @doc "A list of attributes to be ignored when generating migrations" + def migration_ignore_attributes(resource) do + Extension.get_opt(resource, [:sqlite], :migration_ignore_attributes, []) + end + + @doc "The configured custom_indexes for a resource" + def custom_indexes(resource) do + Extension.get_entities(resource, [:sqlite, :custom_indexes]) + end + + @doc "The configured custom_statements for a resource" + def custom_statements(resource) do + Extension.get_entities(resource, [:sqlite, :custom_statements]) + end + + @doc "The configured polymorphic_reference_on_delete for a resource" + def polymorphic_on_delete(resource) do + Extension.get_opt(resource, [:sqlite, :references], :polymorphic_on_delete, nil, true) + end + + @doc "The configured polymorphic_reference_on_update for a resource" + def polymorphic_on_update(resource) do + Extension.get_opt(resource, [:sqlite, :references], :polymorphic_on_update, nil, true) + end + + @doc "The configured polymorphic_reference_name for a resource" + def polymorphic_name(resource) do + Extension.get_opt(resource, [:sqlite, :references], :polymorphic_on_delete, nil, true) + end + + @doc "The configured polymorphic? for a resource" + def polymorphic?(resource) do + Extension.get_opt(resource, [:sqlite], :polymorphic?, nil, true) + end + + @doc "The configured unique_index_names" + def unique_index_names(resource) do + Extension.get_opt(resource, [:sqlite], :unique_index_names, [], true) + end + + @doc "The configured exclusion_constraint_names" + def exclusion_constraint_names(resource) do + Extension.get_opt(resource, [:sqlite], :exclusion_constraint_names, [], true) + end + + @doc "The configured identity_index_names" + def identity_index_names(resource) do + Extension.get_opt(resource, [:sqlite], :identity_index_names, [], true) + end + + @doc "Identities not to include in the migrations" + def skip_identities(resource) do + Extension.get_opt(resource, [:sqlite], :skip_identities, [], true) + end + + @doc "The configured foreign_key_names" + def foreign_key_names(resource) do + Extension.get_opt(resource, [:sqlite], :foreign_key_names, [], true) + end + + @doc "Whether or not the resource should be included when generating migrations" + def migrate?(resource) do + Extension.get_opt(resource, [:sqlite], :migrate?, nil, true) + end + + @doc "A list of keys to always include in upserts." + def global_upsert_keys(resource) do + Extension.get_opt(resource, [:sqlite], :global_upsert_keys, []) + end + + @doc "A stringified version of the base_filter, to be used in a where clause when generating unique indexes" + def base_filter_sql(resource) do + Extension.get_opt(resource, [:sqlite], :base_filter_sql, nil) + end + + @doc "Skip generating unique indexes when generating migrations" + def skip_unique_indexes(resource) do + Extension.get_opt(resource, [:sqlite], :skip_unique_indexes, []) + end +end diff --git a/lib/functions/ilike.ex b/lib/functions/ilike.ex new file mode 100644 index 0000000..be9896f --- /dev/null +++ b/lib/functions/ilike.ex @@ -0,0 +1,9 @@ +defmodule AshSqlite.Functions.ILike do + @moduledoc """ + Maps to the builtin sqlite function `ilike`. + """ + + use Ash.Query.Function, name: :ilike + + def args, do: [[:string, :string]] +end diff --git a/lib/functions/like.ex b/lib/functions/like.ex new file mode 100644 index 0000000..442b807 --- /dev/null +++ b/lib/functions/like.ex @@ -0,0 +1,9 @@ +defmodule AshSqlite.Functions.Like do + @moduledoc """ + Maps to the builtin sqlite function `like`. + """ + + use Ash.Query.Function, name: :like + + def args, do: [[:string, :string]] +end diff --git a/lib/manual_relationship.ex b/lib/manual_relationship.ex new file mode 100644 index 0000000..8d26497 --- /dev/null +++ b/lib/manual_relationship.ex @@ -0,0 +1,25 @@ +defmodule AshSqlite.ManualRelationship do + @moduledoc "A behavior for sqlite-specific manual relationship functionality" + + @callback ash_sqlite_join( + source_query :: Ecto.Query.t(), + opts :: Keyword.t(), + current_binding :: term, + destination_binding :: term, + type :: :inner | :left, + destination_query :: Ecto.Query.t() + ) :: {:ok, Ecto.Query.t()} | {:error, term} + + @callback ash_sqlite_subquery( + opts :: Keyword.t(), + current_binding :: term, + destination_binding :: term, + destination_query :: Ecto.Query.t() + ) :: {:ok, Ecto.Query.t()} | {:error, term} + + defmacro __using__(_) do + quote do + @behaviour AshSqlite.ManualRelationship + end + end +end diff --git a/lib/migration_generator/migration_generator.ex b/lib/migration_generator/migration_generator.ex new file mode 100644 index 0000000..8179048 --- /dev/null +++ b/lib/migration_generator/migration_generator.ex @@ -0,0 +1,2542 @@ +defmodule AshSqlite.MigrationGenerator do + @moduledoc false + + require Logger + + import Mix.Generator + + alias AshSqlite.MigrationGenerator.{Operation, Phase} + + defstruct snapshot_path: nil, + migration_path: nil, + name: nil, + quiet: false, + current_snapshots: nil, + answers: [], + no_shell?: false, + format: true, + dry_run: false, + check: false, + drop_columns: false + + def generate(domains, opts \\ []) do + domains = List.wrap(domains) + opts = opts(opts) + + all_resources = Enum.uniq(Enum.flat_map(domains, &Ash.Domain.Info.resources/1)) + + snapshots = + all_resources + |> Enum.filter(fn resource -> + Ash.DataLayer.data_layer(resource) == AshSqlite.DataLayer && + AshSqlite.DataLayer.Info.migrate?(resource) + end) + |> Enum.flat_map(&get_snapshots(&1, all_resources)) + + repos = + snapshots + |> Enum.map(& &1.repo) + |> Enum.uniq() + + Mix.shell().info("\nExtension Migrations: ") + create_extension_migrations(repos, opts) + Mix.shell().info("\nGenerating Migrations:") + create_migrations(snapshots, opts) + end + + @doc """ + A work in progress utility for getting snapshots. + + Does not support everything supported by the migration generator. + """ + def take_snapshots(domain, repo, only_resources \\ nil) do + all_resources = domain |> Ash.Domain.Info.resources() |> Enum.uniq() + + all_resources + |> Enum.filter(fn resource -> + Ash.DataLayer.data_layer(resource) == AshSqlite.DataLayer && + AshSqlite.DataLayer.Info.repo(resource) == repo && + (is_nil(only_resources) || resource in only_resources) + end) + |> Enum.flat_map(&get_snapshots(&1, all_resources)) + end + + @doc """ + A work in progress utility for getting operations between snapshots. + + Does not support everything supported by the migration generator. + """ + def get_operations_from_snapshots(old_snapshots, new_snapshots, opts \\ []) do + opts = %{opts(opts) | no_shell?: true} + + old_snapshots = + old_snapshots + |> Enum.map(&sanitize_snapshot/1) + + new_snapshots + |> deduplicate_snapshots(opts, old_snapshots) + |> fetch_operations(opts) + |> Enum.flat_map(&elem(&1, 1)) + |> Enum.uniq() + |> organize_operations() + end + + defp add_references_primary_key(snapshot, snapshots) do + %{ + snapshot + | attributes: + snapshot.attributes + |> Enum.map(fn + %{references: references} = attribute when not is_nil(references) -> + if is_nil(Map.get(references, :primary_key?)) do + %{ + attribute + | references: + Map.put( + references, + :primary_key?, + find_references_primary_key( + references, + snapshots + ) + ) + } + else + attribute + end + + attribute -> + attribute + end) + } + end + + defp find_references_primary_key(references, snapshots) do + Enum.find_value(snapshots, false, fn snapshot -> + if snapshot && references && snapshot.table == references.table do + Enum.any?(snapshot.attributes, fn attribute -> + attribute.source == references.destination_attribute && attribute.primary_key? + end) + end + end) + end + + defp opts(opts) do + case struct(__MODULE__, opts) do + %{check: true} = opts -> + %{opts | dry_run: true} + + opts -> + opts + end + end + + defp snapshot_path(%{snapshot_path: snapshot_path}, _) when not is_nil(snapshot_path), + do: snapshot_path + + defp snapshot_path(_config, repo) do + # Copied from ecto's mix task, thanks Ecto ❤️ + config = repo.config() + + app = Keyword.fetch!(config, :otp_app) + Path.join([Mix.Project.deps_paths()[app] || File.cwd!(), "priv", "resource_snapshots"]) + end + + defp create_extension_migrations(repos, opts) do + for repo <- repos do + snapshot_path = snapshot_path(opts, repo) + snapshot_file = Path.join(snapshot_path, "extensions.json") + + installed_extensions = + if File.exists?(snapshot_file) do + snapshot_file + |> File.read!() + |> Jason.decode!(keys: :atoms!) + else + [] + end + + {_extensions_snapshot, installed_extensions} = + case installed_extensions do + installed when is_list(installed) -> + {%{ + installed: installed + }, installed} + + other -> + {other, other.installed} + end + + requesteds = + repo.installed_extensions() + |> Enum.map(fn + extension_module when is_atom(extension_module) -> + {ext_name, version, _up_fn, _down_fn} = extension = extension_module.extension() + + {"#{ext_name}_v#{version}", extension} + + extension_name -> + {extension_name, extension_name} + end) + + to_install = + requesteds + |> Enum.filter(fn {name, _extension} -> !Enum.member?(installed_extensions, name) end) + |> Enum.map(fn {_name, extension} -> extension end) + + if Enum.empty?(to_install) do + Mix.shell().info("No extensions to install") + :ok + else + {module, migration_name} = + case to_install do + [{ext_name, version, _up_fn, _down_fn}] -> + {"install_#{ext_name}_v#{version}", + "#{timestamp(true)}_install_#{ext_name}_v#{version}_extension"} + + [single] -> + {"install_#{single}", "#{timestamp(true)}_install_#{single}_extension"} + + multiple -> + {"install_#{Enum.count(multiple)}_extensions", + "#{timestamp(true)}_install_#{Enum.count(multiple)}_extensions"} + end + + migration_file = + opts + |> migration_path(repo) + |> Path.join(migration_name <> ".exs") + + sanitized_module = + module + |> String.replace("-", "_") + |> Macro.camelize() + + module_name = Module.concat([repo, Migrations, sanitized_module]) + + install = + Enum.map_join(to_install, "\n", fn + {_ext_name, version, up_fn, _down_fn} when is_function(up_fn, 1) -> + up_fn.(version) + + extension -> + raise "only custom extensions supported currently. Got #{inspect(extension)}" + end) + + uninstall = + Enum.map_join(to_install, "\n", fn + {_ext_name, version, _up_fn, down_fn} when is_function(down_fn, 1) -> + down_fn.(version) + + extension -> + raise "only custom extensions supported currently. Got #{inspect(extension)}" + end) + + contents = """ + defmodule #{inspect(module_name)} do + @moduledoc \"\"\" + Installs any extensions that are mentioned in the repo's `installed_extensions/0` callback + + This file was autogenerated with `mix ash_sqlite.generate_migrations` + \"\"\" + + use Ecto.Migration + + def up do + #{install} + end + + def down do + # Uncomment this if you actually want to uninstall the extensions + # when this migration is rolled back: + #{uninstall} + end + end + """ + + installed = Enum.map(requesteds, fn {name, _extension} -> name end) + + snapshot_contents = + Jason.encode!( + %{ + installed: installed + }, + pretty: true + ) + + contents = format(contents, opts) + create_file(snapshot_file, snapshot_contents, force: true) + create_file(migration_file, contents) + end + end + end + + defp create_migrations(snapshots, opts) do + snapshots + |> Enum.group_by(& &1.repo) + |> Enum.each(fn {repo, snapshots} -> + deduped = deduplicate_snapshots(snapshots, opts) + + snapshots_with_operations = + deduped + |> fetch_operations(opts) + |> Enum.map(&add_order_to_operations/1) + + snapshots = Enum.map(snapshots_with_operations, &elem(&1, 0)) + + snapshots_with_operations + |> Enum.flat_map(&elem(&1, 1)) + |> Enum.uniq() + |> case do + [] -> + Mix.shell().info( + "No changes detected, so no migrations or snapshots have been created." + ) + + :ok + + operations -> + if opts.check do + IO.puts(""" + Migrations would have been generated, but the --check flag was provided. + + To see what migration would have been generated, run with the `--dry-run` + option instead. To generate those migrations, run without either flag. + """) + + exit({:shutdown, 1}) + end + + operations + |> organize_operations + |> build_up_and_down() + |> write_migration!(repo, opts) + + create_new_snapshot(snapshots, repo_name(repo), opts) + end + end) + end + + defp add_order_to_operations({snapshot, operations}) do + operations_with_order = Enum.map(operations, &add_order_to_operation(&1, snapshot.attributes)) + + {snapshot, operations_with_order} + end + + defp add_order_to_operation(%{attribute: attribute} = op, attributes) do + order = Enum.find_index(attributes, &(&1.source == attribute.source)) + attribute = Map.put(attribute, :order, order) + + %{op | attribute: attribute} + end + + defp add_order_to_operation(%{new_attribute: attribute} = op, attributes) do + order = Enum.find_index(attributes, &(&1.source == attribute.source)) + attribute = Map.put(attribute, :order, order) + + %{op | new_attribute: attribute} + end + + defp add_order_to_operation(op, _), do: op + + defp organize_operations([]), do: [] + + defp organize_operations(operations) do + operations + |> sort_operations() + |> streamline() + |> group_into_phases() + |> clean_phases() + end + + defp clean_phases(phases) do + phases + |> Enum.flat_map(fn + %{operations: []} -> + [] + + %{operations: operations} = phase -> + if Enum.all?(operations, &match?(%{commented?: true}, &1)) do + [%{phase | commented?: true}] + else + [phase] + end + + op -> + [op] + end) + end + + defp deduplicate_snapshots(snapshots, opts, existing_snapshots \\ []) do + grouped = + snapshots + |> Enum.group_by(fn snapshot -> + snapshot.table + end) + + old_snapshots = + Map.new(grouped, fn {key, [snapshot | _]} -> + old_snapshot = + if opts.no_shell? do + Enum.find(existing_snapshots, &(&1.table == snapshot.table)) + else + get_existing_snapshot(snapshot, opts) + end + + { + key, + old_snapshot + } + end) + + old_snapshots_list = Map.values(old_snapshots) + + old_snapshots = + Map.new(old_snapshots, fn {key, old_snapshot} -> + if old_snapshot do + {key, add_references_primary_key(old_snapshot, old_snapshots_list)} + else + {key, old_snapshot} + end + end) + + grouped + |> Enum.map(fn {key, [snapshot | _] = snapshots} -> + existing_snapshot = old_snapshots[key] + + {primary_key, identities} = merge_primary_keys(existing_snapshot, snapshots, opts) + + attributes = Enum.flat_map(snapshots, & &1.attributes) + + count_with_create = Enum.count(snapshots, & &1.has_create_action) + + new_snapshot = %{ + snapshot + | attributes: merge_attributes(attributes, snapshot.table, count_with_create), + identities: snapshots |> Enum.flat_map(& &1.identities) |> Enum.uniq(), + custom_indexes: snapshots |> Enum.flat_map(& &1.custom_indexes) |> Enum.uniq(), + custom_statements: snapshots |> Enum.flat_map(& &1.custom_statements) |> Enum.uniq() + } + + all_identities = + new_snapshot.identities + |> Kernel.++(identities) + |> Enum.sort_by(& &1.name) + # We sort the identities by there being an identity with a matching name in the existing snapshot + # so that we prefer identities that currently exist over new ones + |> Enum.sort_by(fn identity -> + existing_snapshot + |> Kernel.||(%{}) + |> Map.get(:identities, []) + |> Enum.any?(fn existing_identity -> + existing_identity.name == identity.name + end) + |> Kernel.!() + end) + |> Enum.uniq_by(fn identity -> + {Enum.sort(identity.keys), identity.base_filter} + end) + + new_snapshot = %{new_snapshot | identities: all_identities} + + { + %{ + new_snapshot + | attributes: + Enum.map(new_snapshot.attributes, fn attribute -> + if attribute.source in primary_key do + %{attribute | primary_key?: true} + else + %{attribute | primary_key?: false} + end + end) + }, + existing_snapshot + } + end) + end + + defp merge_attributes(attributes, table, count) do + attributes + |> Enum.with_index() + |> Enum.map(fn {attr, i} -> Map.put(attr, :order, i) end) + |> Enum.group_by(& &1.source) + |> Enum.map(fn {source, attributes} -> + size = + attributes + |> Enum.map(& &1.size) + |> Enum.filter(& &1) + |> case do + [] -> + nil + + sizes -> + Enum.max(sizes) + end + + %{ + source: source, + type: merge_types(Enum.map(attributes, & &1.type), source, table), + size: size, + default: merge_defaults(Enum.map(attributes, & &1.default)), + allow_nil?: Enum.any?(attributes, & &1.allow_nil?) || Enum.count(attributes) < count, + generated?: Enum.any?(attributes, & &1.generated?), + references: merge_references(Enum.map(attributes, & &1.references), source, table), + primary_key?: false, + order: attributes |> Enum.map(& &1.order) |> Enum.min() + } + end) + |> Enum.sort(&(&1.order < &2.order)) + |> Enum.map(&Map.drop(&1, [:order])) + end + + defp merge_references(references, name, table) do + references + |> Enum.reject(&is_nil/1) + |> Enum.uniq() + |> case do + [] -> + nil + + references -> + %{ + destination_attribute: merge_uniq!(references, table, :destination_attribute, name), + deferrable: merge_uniq!(references, table, :deferrable, name), + destination_attribute_default: + merge_uniq!(references, table, :destination_attribute_default, name), + destination_attribute_generated: + merge_uniq!(references, table, :destination_attribute_generated, name), + multitenancy: merge_uniq!(references, table, :multitenancy, name), + primary_key?: merge_uniq!(references, table, :primary_key?, name), + on_delete: merge_uniq!(references, table, :on_delete, name), + on_update: merge_uniq!(references, table, :on_update, name), + name: merge_uniq!(references, table, :name, name), + table: merge_uniq!(references, table, :table, name) + } + end + end + + defp merge_uniq!(references, table, field, attribute) do + references + |> Enum.map(&Map.get(&1, field)) + |> Enum.reject(&is_nil/1) + |> Enum.uniq() + |> case do + [] -> + nil + + [value] -> + value + + values -> + values = Enum.map_join(values, "\n", &" * #{inspect(&1)}") + + raise """ + Conflicting configurations for references for #{table}.#{attribute}: + + Values: + + #{values} + """ + end + end + + defp merge_types(types, name, table) do + types + |> Enum.uniq() + |> case do + [type] -> + type + + types -> + raise "Conflicting types for table `#{table}.#{name}`: #{inspect(types)}" + end + end + + defp merge_defaults(defaults) do + defaults + |> Enum.uniq() + |> case do + [default] -> default + _ -> "nil" + end + end + + defp merge_primary_keys(nil, [snapshot | _] = snapshots, opts) do + snapshots + |> Enum.map(&pkey_names(&1.attributes)) + |> Enum.uniq() + |> case do + [pkey_names] -> + {pkey_names, []} + + unique_primary_keys -> + unique_primary_key_names = + unique_primary_keys + |> Enum.with_index() + |> Enum.map_join("\n", fn {pkey, index} -> + "#{index}: #{inspect(pkey)}" + end) + + choice = + if opts.no_shell? do + raise "Unimplemented: cannot resolve primary key ambiguity without shell input" + else + message = """ + Which primary key should be used for the table `#{snapshot.table}` (enter the number)? + + #{unique_primary_key_names} + """ + + message + |> Mix.shell().prompt() + |> String.to_integer() + end + + identities = + unique_primary_keys + |> List.delete_at(choice) + |> Enum.map(fn pkey_names -> + pkey_name_string = Enum.join(pkey_names, "_") + name = snapshot.table <> "_" <> pkey_name_string + + %{ + keys: pkey_names, + name: name + } + end) + + primary_key = Enum.sort(Enum.at(unique_primary_keys, choice)) + + identities = + Enum.reject(identities, fn identity -> + Enum.sort(identity.keys) == primary_key + end) + + {primary_key, identities} + end + end + + defp merge_primary_keys(existing_snapshot, snapshots, opts) do + pkey_names = pkey_names(existing_snapshot.attributes) + + one_pkey_exists? = + Enum.any?(snapshots, fn snapshot -> + pkey_names(snapshot.attributes) == pkey_names + end) + + if one_pkey_exists? do + identities = + snapshots + |> Enum.map(&pkey_names(&1.attributes)) + |> Enum.uniq() + |> Enum.reject(&(&1 == pkey_names)) + |> Enum.map(fn pkey_names -> + pkey_name_string = Enum.join(pkey_names, "_") + name = existing_snapshot.table <> "_" <> pkey_name_string + + %{ + keys: pkey_names, + name: name + } + end) + + {pkey_names, identities} + else + merge_primary_keys(nil, snapshots, opts) + end + end + + defp pkey_names(attributes) do + attributes + |> Enum.filter(& &1.primary_key?) + |> Enum.map(& &1.source) + |> Enum.sort() + end + + defp migration_path(opts, repo) do + repo_name = repo_name(repo) + # Copied from ecto's mix task, thanks Ecto ❤️ + config = repo.config() + app = Keyword.fetch!(config, :otp_app) + + if opts.migration_path do + opts.migration_path + else + Path.join([Mix.Project.deps_paths()[app] || File.cwd!(), "priv"]) + end + |> Path.join(repo_name) + |> Path.join("migrations") + end + + defp repo_name(repo) do + repo |> Module.split() |> List.last() |> Macro.underscore() + end + + defp write_migration!({up, down}, repo, opts) do + migration_path = migration_path(opts, repo) + + {migration_name, last_part} = + if opts.name do + {"#{timestamp(true)}_#{opts.name}", "#{opts.name}"} + else + count = + migration_path + |> Path.join("*_migrate_resources*") + |> Path.wildcard() + |> Enum.map(fn path -> + path + |> Path.basename() + |> String.split("_migrate_resources", parts: 2) + |> Enum.at(1) + |> Integer.parse() + |> case do + {integer, _} -> + integer + + _ -> + 0 + end + end) + |> Enum.max(fn -> 0 end) + |> Kernel.+(1) + + {"#{timestamp(true)}_migrate_resources#{count}", "migrate_resources#{count}"} + end + + migration_file = + migration_path + |> Path.join(migration_name <> ".exs") + + module_name = + Module.concat([repo, Migrations, Macro.camelize(last_part)]) + + contents = """ + defmodule #{inspect(module_name)} do + @moduledoc \"\"\" + Updates resources based on their most recent snapshots. + + This file was autogenerated with `mix ash_sqlite.generate_migrations` + \"\"\" + + use Ecto.Migration + + def up do + #{up} + end + + def down do + #{down} + end + end + """ + + try do + contents = format(contents, opts) + + if opts.dry_run do + Mix.shell().info(contents) + else + create_file(migration_file, contents) + end + rescue + exception -> + reraise( + """ + Exception while formatting generated code: + #{Exception.format(:error, exception, __STACKTRACE__)} + + Code: + + #{add_line_numbers(contents)} + + To generate it unformatted anyway, but manually fix it, use the `--no-format` option. + """, + __STACKTRACE__ + ) + end + end + + defp add_line_numbers(contents) do + lines = String.split(contents, "\n") + + digits = String.length(to_string(Enum.count(lines))) + + lines + |> Enum.with_index() + |> Enum.map_join("\n", fn {line, index} -> + "#{String.pad_trailing(to_string(index), digits, " ")} | #{line}" + end) + end + + defp create_new_snapshot(snapshots, repo_name, opts) do + unless opts.dry_run do + Enum.each(snapshots, fn snapshot -> + snapshot_binary = snapshot_to_binary(snapshot) + + snapshot_folder = + opts + |> snapshot_path(snapshot.repo) + |> Path.join(repo_name) + + snapshot_file = Path.join(snapshot_folder, "#{snapshot.table}/#{timestamp()}.json") + + File.mkdir_p(Path.dirname(snapshot_file)) + File.write!(snapshot_file, snapshot_binary, []) + + old_snapshot_folder = Path.join(snapshot_folder, "#{snapshot.table}.json") + + if File.exists?(old_snapshot_folder) do + new_snapshot_folder = Path.join(snapshot_folder, "#{snapshot.table}/initial.json") + File.rename(old_snapshot_folder, new_snapshot_folder) + end + end) + end + end + + @doc false + def build_up_and_down(phases) do + up = + Enum.map_join(phases, "\n", fn phase -> + phase + |> phase.__struct__.up() + |> Kernel.<>("\n") + |> maybe_comment(phase) + end) + + down = + phases + |> Enum.reverse() + |> Enum.map_join("\n", fn phase -> + phase + |> phase.__struct__.down() + |> Kernel.<>("\n") + |> maybe_comment(phase) + end) + + {up, down} + end + + defp maybe_comment(text, %{commented?: true}) do + text + |> String.split("\n") + |> Enum.map_join("\n", fn line -> + if String.starts_with?(line, "#") do + line + else + "# #{line}" + end + end) + end + + defp maybe_comment(text, _), do: text + + defp format(string, opts) do + if opts.format do + Code.format_string!(string, locals_without_parens: ecto_sql_locals_without_parens()) + else + string + end + rescue + exception -> + IO.puts(""" + Exception while formatting: + + #{inspect(exception)} + + #{inspect(string)} + """) + + reraise exception, __STACKTRACE__ + end + + defp ecto_sql_locals_without_parens do + path = File.cwd!() |> Path.join("deps/ecto_sql/.formatter.exs") + + if File.exists?(path) do + {opts, _} = Code.eval_file(path) + Keyword.get(opts, :locals_without_parens, []) + else + [] + end + end + + defp streamline(ops, acc \\ []) + defp streamline([], acc), do: Enum.reverse(acc) + + defp streamline( + [ + %Operation.AddAttribute{ + attribute: %{ + source: name + }, + table: table + } = add + | rest + ], + acc + ) do + rest + |> Enum.take_while(fn + %custom{} when custom in [Operation.AddCustomStatement, Operation.RemoveCustomStatement] -> + false + + op -> + op.table == table + end) + |> Enum.with_index() + |> Enum.find(fn + {%Operation.AlterAttribute{ + new_attribute: %{source: ^name, references: references}, + old_attribute: %{source: ^name} + }, _} + when not is_nil(references) -> + true + + _ -> + false + end) + |> case do + nil -> + streamline(rest, [add | acc]) + + {alter, index} -> + new_attribute = Map.put(add.attribute, :references, alter.new_attribute.references) + streamline(List.delete_at(rest, index), [%{add | attribute: new_attribute} | acc]) + end + end + + defp streamline([first | rest], acc) do + streamline(rest, [first | acc]) + end + + defp group_into_phases(ops, current \\ nil, acc \\ []) + + defp group_into_phases([], nil, acc), do: Enum.reverse(acc) + + defp group_into_phases([], phase, acc) do + phase = %{phase | operations: Enum.reverse(phase.operations)} + Enum.reverse([phase | acc]) + end + + defp group_into_phases( + [ + %Operation.CreateTable{table: table, multitenancy: multitenancy} | rest + ], + nil, + acc + ) do + # this is kind of a hack + {has_to_be_in_this_phase, rest} = + Enum.split_with(rest, fn + %Operation.AddAttribute{table: ^table} -> true + _ -> false + end) + + group_into_phases( + rest, + %Phase.Create{ + table: table, + multitenancy: multitenancy, + operations: has_to_be_in_this_phase + }, + acc + ) + end + + defp group_into_phases( + [%Operation.AddAttribute{table: table} = op | rest], + %{table: table} = phase, + acc + ) do + group_into_phases(rest, %{phase | operations: [op | phase.operations]}, acc) + end + + defp group_into_phases( + [%Operation.AlterAttribute{table: table} = op | rest], + %Phase.Alter{table: table} = phase, + acc + ) do + group_into_phases(rest, %{phase | operations: [op | phase.operations]}, acc) + end + + defp group_into_phases( + [%Operation.RenameAttribute{table: table} = op | rest], + %Phase.Alter{table: table} = phase, + acc + ) do + group_into_phases(rest, %{phase | operations: [op | phase.operations]}, acc) + end + + defp group_into_phases( + [%Operation.RemoveAttribute{table: table} = op | rest], + %{table: table} = phase, + acc + ) do + group_into_phases(rest, %{phase | operations: [op | phase.operations]}, acc) + end + + defp group_into_phases([%{no_phase: true} = op | rest], nil, acc) do + group_into_phases(rest, nil, [op | acc]) + end + + defp group_into_phases([operation | rest], nil, acc) do + phase = %Phase.Alter{ + operations: [operation], + multitenancy: operation.multitenancy, + table: operation.table + } + + group_into_phases(rest, phase, acc) + end + + defp group_into_phases(operations, phase, acc) do + phase = %{phase | operations: Enum.reverse(phase.operations)} + group_into_phases(operations, nil, [phase | acc]) + end + + defp sort_operations(ops, acc \\ []) + defp sort_operations([], acc), do: acc + + defp sort_operations([op | rest], []), do: sort_operations(rest, [op]) + + defp sort_operations([op | rest], acc) do + acc = Enum.reverse(acc) + + after_index = Enum.find_index(acc, &after?(op, &1)) + + new_acc = + if after_index do + acc + |> List.insert_at(after_index, op) + |> Enum.reverse() + else + [op | Enum.reverse(acc)] + end + + sort_operations(rest, new_acc) + end + + defp after?(_, %Operation.AlterDeferrability{direction: :down}), do: true + defp after?(%Operation.AlterDeferrability{direction: :up}, _), do: true + + defp after?( + %Operation.RemovePrimaryKey{}, + %Operation.DropForeignKey{} + ), + do: true + + defp after?( + %Operation.DropForeignKey{}, + %Operation.RemovePrimaryKey{} + ), + do: false + + defp after?(%Operation.RemovePrimaryKey{}, _), do: false + defp after?(_, %Operation.RemovePrimaryKey{}), do: true + defp after?(%Operation.RemovePrimaryKeyDown{}, _), do: true + defp after?(_, %Operation.RemovePrimaryKeyDown{}), do: false + + defp after?( + %Operation.AddCustomStatement{}, + _ + ), + do: true + + defp after?( + _, + %Operation.RemoveCustomStatement{} + ), + do: true + + defp after?( + %Operation.AddAttribute{attribute: %{order: l}, table: table}, + %Operation.AddAttribute{attribute: %{order: r}, table: table} + ), + do: l > r + + defp after?( + %Operation.RenameUniqueIndex{ + table: table + }, + %{table: table} + ) do + true + end + + defp after?( + %Operation.AddUniqueIndex{ + table: table + }, + %{table: table} + ) do + true + end + + defp after?( + %Operation.AddCustomIndex{ + table: table + }, + %Operation.AddAttribute{table: table} + ) do + true + end + + defp after?( + %Operation.RemoveUniqueIndex{table: table}, + %Operation.AddUniqueIndex{table: table} + ) do + false + end + + defp after?( + %Operation.RemoveUniqueIndex{table: table}, + %{table: table} + ) do + true + end + + defp after?(%Operation.AlterAttribute{table: table}, %Operation.DropForeignKey{ + table: table, + direction: :up + }), + do: true + + defp after?( + %Operation.AlterAttribute{table: table}, + %Operation.DropForeignKey{ + table: table, + direction: :down + } + ), + do: false + + defp after?( + %Operation.DropForeignKey{ + table: table, + direction: :down + }, + %Operation.AlterAttribute{table: table} + ), + do: true + + defp after?(%Operation.AddAttribute{table: table}, %Operation.CreateTable{ + table: table + }) do + true + end + + defp after?( + %Operation.AddAttribute{ + attribute: %{ + references: %{table: table, destination_attribute: name} + } + }, + %Operation.AddAttribute{table: table, attribute: %{source: name}} + ), + do: true + + defp after?( + %Operation.AddAttribute{ + table: table, + attribute: %{ + primary_key?: false + } + }, + %Operation.AddAttribute{table: table, attribute: %{primary_key?: true}} + ), + do: true + + defp after?( + %Operation.AddAttribute{ + table: table, + attribute: %{ + primary_key?: true + } + }, + %Operation.RemoveAttribute{ + table: table, + attribute: %{primary_key?: true} + } + ), + do: true + + defp after?( + %Operation.AddAttribute{ + table: table, + attribute: %{ + primary_key?: true + } + }, + %Operation.AlterAttribute{ + table: table, + new_attribute: %{primary_key?: false}, + old_attribute: %{primary_key?: true} + } + ), + do: true + + defp after?( + %Operation.AddAttribute{ + table: table, + attribute: %{ + primary_key?: true + } + }, + %Operation.AlterAttribute{ + table: table, + new_attribute: %{primary_key?: false}, + old_attribute: %{primary_key?: true} + } + ), + do: true + + defp after?( + %Operation.RemoveAttribute{ + table: table, + attribute: %{primary_key?: true} + }, + %Operation.AlterAttribute{ + table: table, + new_attribute: %{ + primary_key?: true + }, + old_attribute: %{ + primary_key?: false + } + } + ), + do: true + + defp after?( + %Operation.AlterAttribute{ + table: table, + new_attribute: %{primary_key?: false}, + old_attribute: %{ + primary_key?: true + } + }, + %Operation.AlterAttribute{ + table: table, + new_attribute: %{ + primary_key?: true + }, + old_attribute: %{ + primary_key?: false + } + } + ), + do: true + + defp after?( + %Operation.AlterAttribute{ + table: table, + new_attribute: %{primary_key?: false}, + old_attribute: %{ + primary_key?: true + } + }, + %Operation.AddAttribute{ + table: table, + attribute: %{ + primary_key?: true + } + } + ), + do: false + + defp after?( + %Operation.AlterAttribute{ + table: table, + new_attribute: %{primary_key?: false}, + old_attribute: %{primary_key?: true} + }, + %Operation.AddAttribute{ + table: table, + attribute: %{ + primary_key?: true + } + } + ), + do: true + + defp after?( + %Operation.AlterAttribute{ + new_attribute: %{ + references: %{destination_attribute: destination_attribute, table: table} + } + }, + %Operation.AddUniqueIndex{identity: %{keys: keys}, table: table} + ) do + destination_attribute in keys + end + + defp after?( + %Operation.AlterAttribute{ + new_attribute: %{references: %{table: table, destination_attribute: source}} + }, + %Operation.AlterAttribute{ + new_attribute: %{ + source: source + }, + table: table + } + ) do + true + end + + defp after?( + %Operation.AlterAttribute{ + new_attribute: %{ + source: source + }, + table: table + }, + %Operation.AlterAttribute{ + new_attribute: %{references: %{table: table, destination_attribute: source}} + } + ) do + false + end + + defp after?( + %Operation.RemoveAttribute{attribute: %{source: source}, table: table}, + %Operation.AlterAttribute{ + old_attribute: %{ + references: %{table: table, destination_attribute: source} + } + } + ), + do: true + + defp after?( + %Operation.AlterAttribute{ + new_attribute: %{ + references: %{table: table, destination_attribute: name} + } + }, + %Operation.AddAttribute{table: table, attribute: %{source: name}} + ), + do: true + + defp after?( + %Operation.AlterAttribute{new_attribute: %{references: references}, table: table}, + %{table: table} + ) + when not is_nil(references), + do: true + + defp after?(_, _), do: false + + defp fetch_operations(snapshots, opts) do + snapshots + |> Enum.map(fn {snapshot, existing_snapshot} -> + {snapshot, do_fetch_operations(snapshot, existing_snapshot, opts)} + end) + |> Enum.reject(fn {_, ops} -> + Enum.empty?(ops) + end) + end + + defp do_fetch_operations(snapshot, existing_snapshot, opts, acc \\ []) + + defp do_fetch_operations(snapshot, nil, opts, acc) do + empty_snapshot = %{ + attributes: [], + identities: [], + custom_indexes: [], + custom_statements: [], + table: snapshot.table, + repo: snapshot.repo, + base_filter: nil, + empty?: true, + multitenancy: %{ + attribute: nil, + strategy: nil, + global: nil + } + } + + do_fetch_operations(snapshot, empty_snapshot, opts, [ + %Operation.CreateTable{ + table: snapshot.table, + multitenancy: snapshot.multitenancy, + old_multitenancy: empty_snapshot.multitenancy + } + | acc + ]) + end + + defp do_fetch_operations(snapshot, old_snapshot, opts, acc) do + attribute_operations = attribute_operations(snapshot, old_snapshot, opts) + pkey_operations = pkey_operations(snapshot, old_snapshot, attribute_operations) + + rewrite_all_identities? = changing_multitenancy_affects_identities?(snapshot, old_snapshot) + + custom_statements_to_add = + snapshot.custom_statements + |> Enum.reject(fn statement -> + Enum.any?(old_snapshot.custom_statements, &(&1.name == statement.name)) + end) + |> Enum.map(&%Operation.AddCustomStatement{statement: &1, table: snapshot.table}) + + custom_statements_to_remove = + old_snapshot.custom_statements + |> Enum.reject(fn old_statement -> + Enum.any?(snapshot.custom_statements, &(&1.name == old_statement.name)) + end) + |> Enum.map(&%Operation.RemoveCustomStatement{statement: &1, table: snapshot.table}) + + custom_statements_to_alter = + snapshot.custom_statements + |> Enum.flat_map(fn statement -> + old_statement = Enum.find(old_snapshot.custom_statements, &(&1.name == statement.name)) + + if old_statement && + (old_statement.code? != statement.code? || + old_statement.up != statement.up || old_statement.down != statement.down) do + [ + %Operation.RemoveCustomStatement{statement: old_statement, table: snapshot.table}, + %Operation.AddCustomStatement{statement: statement, table: snapshot.table} + ] + else + [] + end + end) + + custom_indexes_to_add = + Enum.filter(snapshot.custom_indexes, fn index -> + !Enum.find(old_snapshot.custom_indexes, fn old_custom_index -> + indexes_match?(snapshot.table, old_custom_index, index) + end) + end) + |> Enum.map(fn custom_index -> + %Operation.AddCustomIndex{ + index: custom_index, + table: snapshot.table, + multitenancy: snapshot.multitenancy, + base_filter: snapshot.base_filter + } + end) + + custom_indexes_to_remove = + Enum.filter(old_snapshot.custom_indexes, fn old_custom_index -> + rewrite_all_identities? || + !Enum.find(snapshot.custom_indexes, fn index -> + indexes_match?(snapshot.table, old_custom_index, index) + end) + end) + |> Enum.map(fn custom_index -> + %Operation.RemoveCustomIndex{ + index: custom_index, + table: old_snapshot.table, + multitenancy: old_snapshot.multitenancy, + base_filter: old_snapshot.base_filter + } + end) + + unique_indexes_to_remove = + if rewrite_all_identities? do + old_snapshot.identities + else + Enum.reject(old_snapshot.identities, fn old_identity -> + Enum.find(snapshot.identities, fn identity -> + identity.name == old_identity.name && + Enum.sort(old_identity.keys) == Enum.sort(identity.keys) && + old_identity.base_filter == identity.base_filter + end) + end) + end + |> Enum.map(fn identity -> + %Operation.RemoveUniqueIndex{ + identity: identity, + table: snapshot.table + } + end) + + unique_indexes_to_rename = + if rewrite_all_identities? do + [] + else + snapshot.identities + |> Enum.map(fn identity -> + Enum.find_value(old_snapshot.identities, fn old_identity -> + if old_identity.name == identity.name && + old_identity.index_name != identity.index_name do + {old_identity, identity} + end + end) + end) + |> Enum.filter(& &1) + end + |> Enum.map(fn {old_identity, new_identity} -> + %Operation.RenameUniqueIndex{ + old_identity: old_identity, + new_identity: new_identity, + table: snapshot.table + } + end) + + unique_indexes_to_add = + if rewrite_all_identities? do + snapshot.identities + else + Enum.reject(snapshot.identities, fn identity -> + Enum.find(old_snapshot.identities, fn old_identity -> + old_identity.name == identity.name && + Enum.sort(old_identity.keys) == Enum.sort(identity.keys) && + old_identity.base_filter == identity.base_filter + end) + end) + end + |> Enum.map(fn identity -> + %Operation.AddUniqueIndex{ + identity: identity, + table: snapshot.table + } + end) + + [ + pkey_operations, + unique_indexes_to_remove, + attribute_operations, + unique_indexes_to_add, + unique_indexes_to_rename, + custom_indexes_to_add, + custom_indexes_to_remove, + custom_statements_to_add, + custom_statements_to_remove, + custom_statements_to_alter, + acc + ] + |> Enum.concat() + |> Enum.map(&Map.put(&1, :multitenancy, snapshot.multitenancy)) + |> Enum.map(&Map.put(&1, :old_multitenancy, old_snapshot.multitenancy)) + end + + defp indexes_match?(table, left, right) do + left = + left + |> Map.update!(:fields, fn fields -> + Enum.map(fields, &to_string/1) + end) + |> add_custom_index_name(table) + + right = + right + |> Map.update!(:fields, fn fields -> + Enum.map(fields, &to_string/1) + end) + |> add_custom_index_name(table) + + left == right + end + + defp add_custom_index_name(custom_index, table) do + custom_index + |> Map.put_new_lazy(:name, fn -> + AshSqlite.CustomIndex.name(table, %{fields: custom_index.fields}) + end) + |> Map.update!( + :name, + &(&1 || AshSqlite.CustomIndex.name(table, %{fields: custom_index.fields})) + ) + end + + defp pkey_operations(snapshot, old_snapshot, attribute_operations) do + if old_snapshot[:empty?] do + [] + else + must_drop_pkey? = + Enum.any?( + attribute_operations, + fn + %Operation.AlterAttribute{ + old_attribute: %{primary_key?: old_primary_key}, + new_attribute: %{primary_key?: new_primary_key} + } + when old_primary_key != new_primary_key -> + true + + %Operation.AddAttribute{ + attribute: %{primary_key?: true} + } -> + true + + _ -> + false + end + ) + + if must_drop_pkey? do + [ + %Operation.RemovePrimaryKey{table: snapshot.table}, + %Operation.RemovePrimaryKeyDown{table: snapshot.table} + ] + else + [] + end + end + end + + defp attribute_operations(snapshot, old_snapshot, opts) do + attributes_to_add = + Enum.reject(snapshot.attributes, fn attribute -> + Enum.find(old_snapshot.attributes, &(&1.source == attribute.source)) + end) + + attributes_to_remove = + Enum.reject(old_snapshot.attributes, fn attribute -> + Enum.find(snapshot.attributes, &(&1.source == attribute.source)) + end) + + {attributes_to_add, attributes_to_remove, attributes_to_rename} = + resolve_renames(snapshot.table, attributes_to_add, attributes_to_remove, opts) + + attributes_to_alter = + snapshot.attributes + |> Enum.map(fn attribute -> + {attribute, + Enum.find( + old_snapshot.attributes, + &(&1.source == attribute.source && + attributes_unequal?(&1, attribute, snapshot.repo, old_snapshot, snapshot)) + )} + end) + |> Enum.filter(&elem(&1, 1)) + + rename_attribute_events = + Enum.map(attributes_to_rename, fn {new, old} -> + %Operation.RenameAttribute{ + new_attribute: new, + old_attribute: old, + table: snapshot.table + } + end) + + add_attribute_events = + Enum.flat_map(attributes_to_add, fn attribute -> + if attribute.references do + [ + %Operation.AddAttribute{ + attribute: attribute, + table: snapshot.table + }, + %Operation.DropForeignKey{ + attribute: attribute, + table: snapshot.table, + multitenancy: Map.get(attribute, :multitenancy), + direction: :down + } + ] + else + [ + %Operation.AddAttribute{ + attribute: attribute, + table: snapshot.table + } + ] + end + end) + + alter_attribute_events = + Enum.flat_map(attributes_to_alter, fn {new_attribute, old_attribute} -> + deferrable_ops = + if differently_deferrable?(new_attribute, old_attribute) do + [ + %Operation.AlterDeferrability{ + table: snapshot.table, + references: new_attribute.references, + direction: :up + }, + %Operation.AlterDeferrability{ + table: snapshot.table, + references: Map.get(old_attribute, :references), + direction: :down + } + ] + else + [] + end + + if has_reference?(old_snapshot.multitenancy, old_attribute) and + Map.get(old_attribute, :references) != Map.get(new_attribute, :references) do + redo_deferrability = + if differently_deferrable?(new_attribute, old_attribute) do + [] + else + [ + %Operation.AlterDeferrability{ + table: snapshot.table, + references: new_attribute.references, + direction: :up + } + ] + end + + old_and_alter = + [ + %Operation.DropForeignKey{ + attribute: old_attribute, + table: snapshot.table, + multitenancy: old_snapshot.multitenancy, + direction: :up + }, + %Operation.AlterAttribute{ + new_attribute: new_attribute, + old_attribute: old_attribute, + table: snapshot.table + } + ] ++ redo_deferrability + + if has_reference?(snapshot.multitenancy, new_attribute) do + reference_ops = [ + %Operation.DropForeignKey{ + attribute: new_attribute, + table: snapshot.table, + multitenancy: snapshot.multitenancy, + direction: :down + } + ] + + old_and_alter ++ + reference_ops + else + old_and_alter + end + else + [ + %Operation.AlterAttribute{ + new_attribute: Map.delete(new_attribute, :references), + old_attribute: Map.delete(old_attribute, :references), + table: snapshot.table + } + ] + end + |> Enum.concat(deferrable_ops) + end) + + remove_attribute_events = + Enum.map(attributes_to_remove, fn attribute -> + %Operation.RemoveAttribute{ + attribute: attribute, + table: snapshot.table, + commented?: !opts.drop_columns + } + end) + + add_attribute_events ++ + alter_attribute_events ++ remove_attribute_events ++ rename_attribute_events + end + + defp differently_deferrable?(%{references: %{deferrable: left}}, %{ + references: %{deferrable: right} + }) + when left != right do + true + end + + defp differently_deferrable?(%{references: %{deferrable: same}}, %{ + references: %{deferrable: same} + }) do + false + end + + defp differently_deferrable?(%{references: %{deferrable: left}}, _) when left != false, do: true + + defp differently_deferrable?(_, %{references: %{deferrable: right}}) when right != false, + do: true + + defp differently_deferrable?(_, _), do: false + + # This exists to handle the fact that the remapping of the key name -> source caused attributes + # to be considered unequal. We ignore things that only differ in that way using this function. + defp attributes_unequal?(left, right, repo, _old_snapshot, _new_snapshot) do + left = clean_for_equality(left, repo) + + right = clean_for_equality(right, repo) + + left != right + end + + defp clean_for_equality(attribute, _repo) do + cond do + attribute[:source] -> + Map.put(attribute, :name, attribute[:source]) + |> Map.update!(:source, &to_string/1) + |> Map.update!(:name, &to_string/1) + + attribute[:name] -> + attribute + |> Map.put(:source, attribute[:name]) + |> Map.update!(:source, &to_string/1) + |> Map.update!(:name, &to_string/1) + + true -> + attribute + end + |> add_ignore() + |> then(fn + # only :integer cares about `destination_attribute_generated` + # so we clean it here to avoid generating unnecessary snapshots + # during the transitionary period of adding it + %{type: type, references: references} = attribute + when not is_nil(references) and type != :integer -> + Map.update!(attribute, :references, &Map.delete(&1, :destination_attribute_generated)) + + attribute -> + attribute + end) + end + + defp add_ignore(%{references: references} = attribute) when is_map(references) do + %{attribute | references: Map.put_new(references, :ignore?, false)} + end + + defp add_ignore(attribute) do + attribute + end + + def changing_multitenancy_affects_identities?(snapshot, old_snapshot) do + snapshot.multitenancy != old_snapshot.multitenancy || + snapshot.base_filter != old_snapshot.base_filter + end + + def has_reference?(_multitenancy, attribute) do + not is_nil(Map.get(attribute, :references)) + end + + def get_existing_snapshot(snapshot, opts) do + repo_name = snapshot.repo |> Module.split() |> List.last() |> Macro.underscore() + + folder = + opts + |> snapshot_path(snapshot.repo) + |> Path.join(repo_name) + + snapshot_folder = Path.join(folder, snapshot.table) + + if File.exists?(snapshot_folder) do + snapshot_folder + |> File.ls!() + |> Enum.filter(&String.ends_with?(&1, ".json")) + |> Enum.map(&String.trim_trailing(&1, ".json")) + |> Enum.map(&Integer.parse/1) + |> Enum.filter(fn + {_int, remaining} -> + remaining == "" + + :error -> + false + end) + |> Enum.map(&elem(&1, 0)) + |> case do + [] -> + get_old_snapshot(folder, snapshot) + + timestamps -> + timestamp = Enum.max(timestamps) + snapshot_file = Path.join(snapshot_folder, "#{timestamp}.json") + + snapshot_file + |> File.read!() + |> load_snapshot() + end + else + get_old_snapshot(folder, snapshot) + end + end + + defp get_old_snapshot(folder, snapshot) do + old_snapshot_file = Path.join(folder, "#{snapshot.table}.json") + # This is adapter code for the old version, where migrations were stored in a flat directory + if File.exists?(old_snapshot_file) do + old_snapshot_file + |> File.read!() + |> load_snapshot() + end + end + + defp resolve_renames(_table, adding, [], _opts), do: {adding, [], []} + + defp resolve_renames(_table, [], removing, _opts), do: {[], removing, []} + + defp resolve_renames(table, [adding], [removing], opts) do + if renaming_to?(table, removing.source, adding.source, opts) do + {[], [], [{adding, removing}]} + else + {[adding], [removing], []} + end + end + + defp resolve_renames(table, adding, [removing | rest], opts) do + {new_adding, new_removing, new_renames} = + if renaming?(table, removing, opts) do + new_attribute = + if opts.no_shell? do + raise "Unimplemented: Cannot get new_attribute without the shell!" + else + get_new_attribute(adding) + end + + {adding -- [new_attribute], [], [{new_attribute, removing}]} + else + {adding, [removing], []} + end + + {rest_adding, rest_removing, rest_renames} = resolve_renames(table, new_adding, rest, opts) + + {new_adding ++ rest_adding, new_removing ++ rest_removing, rest_renames ++ new_renames} + end + + defp renaming_to?(table, removing, adding, opts) do + if opts.no_shell? do + raise "Unimplemented: cannot determine: Are you renaming #{table}.#{removing} to #{table}.#{adding}? without shell input" + else + Mix.shell().yes?("Are you renaming #{table}.#{removing} to #{table}.#{adding}?") + end + end + + defp renaming?(table, removing, opts) do + if opts.no_shell? do + raise "Unimplemented: cannot determine: Are you renaming #{table}.#{removing.source}? without shell input" + else + Mix.shell().yes?("Are you renaming #{table}.#{removing.source}?") + end + end + + defp get_new_attribute(adding, tries \\ 3) + + defp get_new_attribute(_adding, 0) do + raise "Could not get matching name after 3 attempts." + end + + defp get_new_attribute(adding, tries) do + name = + Mix.shell().prompt( + "What are you renaming it to?: #{Enum.map_join(adding, ", ", & &1.source)}" + ) + + name = + if name do + String.trim(name) + else + nil + end + + case Enum.find(adding, &(to_string(&1.source) == name)) do + nil -> get_new_attribute(adding, tries - 1) + new_attribute -> new_attribute + end + end + + defp timestamp(require_unique? \\ false) do + # Alright, this is silly I know. But migration ids need to be unique + # and "synthesizing" that behavior is significantly more annoying than + # just waiting a bit, ensuring the migration versions are unique. + if require_unique?, do: :timer.sleep(1500) + {{y, m, d}, {hh, mm, ss}} = :calendar.universal_time() + "#{y}#{pad(m)}#{pad(d)}#{pad(hh)}#{pad(mm)}#{pad(ss)}" + end + + defp pad(i) when i < 10, do: <> + defp pad(i), do: to_string(i) + + def get_snapshots(resource, all_resources) do + Code.ensure_compiled!(AshSqlite.DataLayer.Info.repo(resource)) + + if AshSqlite.DataLayer.Info.polymorphic?(resource) do + all_resources + |> Enum.flat_map(&Ash.Resource.Info.relationships/1) + |> Enum.filter(&(&1.destination == resource)) + |> Enum.reject(&(&1.type == :belongs_to)) + |> Enum.filter(& &1.context[:data_layer][:table]) + |> Enum.uniq() + |> Enum.map(fn relationship -> + resource + |> do_snapshot(relationship.context[:data_layer][:table]) + |> Map.update!(:identities, fn identities -> + identity_index_names = AshSqlite.DataLayer.Info.identity_index_names(resource) + + Enum.map(identities, fn identity -> + Map.put( + identity, + :index_name, + identity_index_names[identity.name] || + "#{relationship.context[:data_layer][:table]}_#{identity.name}_index" + ) + end) + end) + |> Map.update!(:attributes, fn attributes -> + Enum.map(attributes, fn attribute -> + destination_attribute_source = + relationship.destination + |> Ash.Resource.Info.attribute(relationship.destination_attribute) + |> Map.get(:source) + + if attribute.source == destination_attribute_source do + source_attribute = + Ash.Resource.Info.attribute(relationship.source, relationship.source_attribute) + + Map.put(attribute, :references, %{ + destination_attribute: source_attribute.source, + destination_attribute_default: + default( + source_attribute, + relationship.destination, + AshSqlite.DataLayer.Info.repo(relationship.destination) + ), + deferrable: false, + destination_attribute_generated: source_attribute.generated?, + multitenancy: multitenancy(relationship.source), + table: AshSqlite.DataLayer.Info.table(relationship.source), + on_delete: AshSqlite.DataLayer.Info.polymorphic_on_delete(relationship.source), + on_update: AshSqlite.DataLayer.Info.polymorphic_on_update(relationship.source), + primary_key?: source_attribute.primary_key?, + name: + AshSqlite.DataLayer.Info.polymorphic_name(relationship.source) || + "#{relationship.context[:data_layer][:table]}_#{destination_attribute_source}_fkey" + }) + else + attribute + end + end) + end) + end) + else + [do_snapshot(resource, AshSqlite.DataLayer.Info.table(resource))] + end + end + + defp do_snapshot(resource, table) do + snapshot = %{ + attributes: attributes(resource, table), + identities: identities(resource), + table: table || AshSqlite.DataLayer.Info.table(resource), + custom_indexes: custom_indexes(resource), + custom_statements: custom_statements(resource), + repo: AshSqlite.DataLayer.Info.repo(resource), + multitenancy: multitenancy(resource), + base_filter: AshSqlite.DataLayer.Info.base_filter_sql(resource), + has_create_action: has_create_action?(resource) + } + + hash = + :sha256 + |> :crypto.hash(inspect(snapshot)) + |> Base.encode16() + + Map.put(snapshot, :hash, hash) + end + + defp has_create_action?(resource) do + resource + |> Ash.Resource.Info.actions() + |> Enum.any?(&(&1.type == :create && !&1.manual)) + end + + defp custom_indexes(resource) do + resource + |> AshSqlite.DataLayer.Info.custom_indexes() + |> Enum.map(fn custom_index -> + Map.take(custom_index, AshSqlite.CustomIndex.fields()) + end) + end + + defp custom_statements(resource) do + resource + |> AshSqlite.DataLayer.Info.custom_statements() + |> Enum.map(fn custom_statement -> + Map.take(custom_statement, AshSqlite.Statement.fields()) + end) + end + + defp multitenancy(resource) do + strategy = Ash.Resource.Info.multitenancy_strategy(resource) + attribute = Ash.Resource.Info.multitenancy_attribute(resource) + global = Ash.Resource.Info.multitenancy_global?(resource) + + %{ + strategy: strategy, + attribute: attribute, + global: global + } + end + + defp attributes(resource, table) do + repo = AshSqlite.DataLayer.Info.repo(resource) + ignored = AshSqlite.DataLayer.Info.migration_ignore_attributes(resource) || [] + + resource + |> Ash.Resource.Info.attributes() + |> Enum.reject(&(&1.name in ignored)) + |> Enum.map( + &Map.take(&1, [ + :name, + :source, + :type, + :default, + :allow_nil?, + :generated?, + :primary_key?, + :constraints + ]) + ) + |> Enum.map(fn attribute -> + default = default(attribute, resource, repo) + + type = + AshSqlite.DataLayer.Info.migration_types(resource)[attribute.name] || + migration_type(attribute.type, attribute.constraints) + + type = + if :erlang.function_exported(repo, :override_migration_type, 1) do + repo.override_migration_type(type) + else + type + end + + {type, size} = + case type do + {:varchar, size} -> + {:varchar, size} + + {:binary, size} -> + {:binary, size} + + {other, size} when is_atom(other) and is_integer(size) -> + {other, size} + + other -> + {other, nil} + end + + attribute + |> Map.put(:default, default) + |> Map.put(:size, size) + |> Map.put(:type, type) + |> Map.put(:source, attribute.source || attribute.name) + |> Map.drop([:name, :constraints]) + end) + |> Enum.map(fn attribute -> + references = find_reference(resource, table, attribute) + + Map.put(attribute, :references, references) + end) + end + + defp find_reference(resource, table, attribute) do + Enum.find_value(Ash.Resource.Info.relationships(resource), fn relationship -> + source_attribute_name = + relationship.source + |> Ash.Resource.Info.attribute(relationship.source_attribute) + |> then(fn attribute -> + attribute.source || attribute.name + end) + + if attribute.source == source_attribute_name && relationship.type == :belongs_to && + foreign_key?(relationship) do + configured_reference = + configured_reference(resource, table, attribute.source || attribute.name, relationship) + + unless Map.get(configured_reference, :ignore?) do + destination_attribute = + Ash.Resource.Info.attribute( + relationship.destination, + relationship.destination_attribute + ) + + destination_attribute_source = + destination_attribute.source || destination_attribute.name + + %{ + destination_attribute: destination_attribute_source, + deferrable: configured_reference.deferrable, + multitenancy: multitenancy(relationship.destination), + on_delete: configured_reference.on_delete, + on_update: configured_reference.on_update, + name: configured_reference.name, + primary_key?: destination_attribute.primary_key?, + table: + relationship.context[:data_layer][:table] || + AshSqlite.DataLayer.Info.table(relationship.destination) + } + end + end + end) + end + + defp configured_reference(resource, table, attribute, relationship) do + ref = + resource + |> AshSqlite.DataLayer.Info.references() + |> Enum.find(&(&1.relationship == relationship.name)) + |> Kernel.||(%{ + on_delete: nil, + on_update: nil, + deferrable: false, + name: nil, + ignore?: false + }) + + ref + |> Map.put(:name, ref.name || "#{table}_#{attribute}_fkey") + |> Map.put( + :primary_key?, + Ash.Resource.Info.attribute( + relationship.destination, + relationship.destination_attribute + ).primary_key? + ) + end + + def get_migration_type(type, constraints), do: migration_type(type, constraints) + + defp migration_type({:array, type}, constraints), + do: {:array, migration_type(type, constraints)} + + defp migration_type(Ash.Type.CiString, _), do: :citext + defp migration_type(Ash.Type.UUID, _), do: :uuid + defp migration_type(Ash.Type.Integer, _), do: :bigint + + defp migration_type(other, constraints) do + type = Ash.Type.get_type(other) + + migration_type_from_storage_type(Ash.Type.storage_type(type, constraints)) + end + + defp migration_type_from_storage_type(:string), do: :text + defp migration_type_from_storage_type(:ci_string), do: :citext + defp migration_type_from_storage_type(storage_type), do: storage_type + + defp foreign_key?(relationship) do + Ash.DataLayer.data_layer(relationship.source) == AshSqlite.DataLayer && + AshSqlite.DataLayer.Info.repo(relationship.source) == + AshSqlite.DataLayer.Info.repo(relationship.destination) + end + + defp identities(resource) do + identity_index_names = AshSqlite.DataLayer.Info.identity_index_names(resource) + + resource + |> Ash.Resource.Info.identities() + |> case do + [] -> + [] + + identities -> + base_filter = Ash.Resource.Info.base_filter(resource) + + if base_filter && !AshSqlite.DataLayer.Info.base_filter_sql(resource) do + raise """ + Cannot create a unique index for a resource with a base filter without also configuring `base_filter_sql`. + + You must provide the `base_filter_sql` option, or skip unique indexes with `skip_unique_indexes`" + """ + end + + identities + end + |> Enum.reject(fn identity -> + identity.name in AshSqlite.DataLayer.Info.skip_unique_indexes(resource) + end) + |> Enum.filter(fn identity -> + Enum.all?(identity.keys, fn key -> + Ash.Resource.Info.attribute(resource, key) + end) + end) + |> Enum.sort_by(& &1.name) + |> Enum.map(&Map.take(&1, [:name, :keys])) + |> Enum.map(fn %{keys: keys} = identity -> + %{ + identity + | keys: + Enum.map(keys, fn key -> + attribute = Ash.Resource.Info.attribute(resource, key) + attribute.source || attribute.name + end) + } + end) + |> Enum.map(fn identity -> + Map.put( + identity, + :index_name, + identity_index_names[identity.name] || + "#{AshSqlite.DataLayer.Info.table(resource)}_#{identity.name}_index" + ) + end) + |> Enum.map(&Map.put(&1, :base_filter, AshSqlite.DataLayer.Info.base_filter_sql(resource))) + end + + defp default(%{name: name, default: default}, resource, _repo) when is_function(default) do + configured_default(resource, name) || "nil" + end + + defp default(%{name: name, default: {_, _, _}}, resource, _), + do: configured_default(resource, name) || "nil" + + defp default(%{name: name, default: nil}, resource, _), + do: configured_default(resource, name) || "nil" + + defp default(%{name: name, default: []}, resource, _), + do: configured_default(resource, name) || "[]" + + defp default(%{name: name, default: default}, resource, _) when default == %{}, + do: configured_default(resource, name) || "%{}" + + defp default(%{name: name, default: value, type: type} = attr, resource, _) do + case configured_default(resource, name) do + nil -> + case migration_default(type, Map.get(attr, :constraints, []), value) do + {:ok, default} -> + default + + :error -> + "nil" + end + + default -> + default + end + end + + defp migration_default(type, constraints, value) do + type = + type + |> unwrap_type() + |> Ash.Type.get_type() + + if function_exported?(type, :value_to_sqlite_default, 3) do + type.value_to_sqlite_default(type, constraints, value) + else + :error + end + end + + defp unwrap_type({:array, type}), do: unwrap_type(type) + defp unwrap_type(type), do: type + + defp configured_default(resource, attribute) do + AshSqlite.DataLayer.Info.migration_defaults(resource)[attribute] + end + + defp snapshot_to_binary(snapshot) do + snapshot + |> Map.update!(:attributes, fn attributes -> + Enum.map(attributes, fn attribute -> + %{attribute | type: sanitize_type(attribute.type, attribute[:size])} + end) + end) + |> Jason.encode!(pretty: true) + end + + defp sanitize_type({:array, type}, size) do + ["array", sanitize_type(type, size)] + end + + defp sanitize_type(:varchar, size) when not is_nil(size) do + ["varchar", size] + end + + defp sanitize_type(:binary, size) when not is_nil(size) do + ["binary", size] + end + + defp sanitize_type(type, size) when is_atom(type) and is_integer(size) do + [sanitize_type(type, nil), size] + end + + defp sanitize_type(type, _) do + type + end + + defp load_snapshot(json) do + json + |> Jason.decode!(keys: :atoms!) + |> sanitize_snapshot() + end + + defp sanitize_snapshot(snapshot) do + snapshot + |> Map.put_new(:has_create_action, true) + |> Map.update!(:identities, fn identities -> + Enum.map(identities, &load_identity(&1, snapshot.table)) + end) + |> Map.update!(:attributes, fn attributes -> + Enum.map(attributes, fn attribute -> + attribute = load_attribute(attribute, snapshot.table) + + if is_map(Map.get(attribute, :references)) do + %{ + attribute + | references: rewrite(attribute.references, :ignore, :ignore?) + } + else + attribute + end + end) + end) + |> Map.put_new(:custom_indexes, []) + |> Map.update!(:custom_indexes, &load_custom_indexes/1) + |> Map.put_new(:custom_statements, []) + |> Map.update!(:custom_statements, &load_custom_statements/1) + |> Map.update!(:repo, &String.to_atom/1) + |> Map.put_new(:multitenancy, %{ + attribute: nil, + strategy: nil, + global: nil + }) + |> Map.update!(:multitenancy, &load_multitenancy/1) + |> Map.put_new(:base_filter, nil) + end + + defp load_custom_indexes(custom_indexes) do + Enum.map(custom_indexes || [], fn custom_index -> + custom_index + |> Map.put_new(:fields, []) + |> Map.put_new(:include, []) + |> Map.put_new(:message, nil) + end) + end + + defp load_custom_statements(statements) do + Enum.map(statements || [], fn statement -> + Map.update!(statement, :name, &String.to_atom/1) + end) + end + + defp load_multitenancy(multitenancy) do + multitenancy + |> Map.update!(:strategy, fn strategy -> strategy && String.to_atom(strategy) end) + |> Map.update!(:attribute, fn attribute -> attribute && String.to_atom(attribute) end) + end + + defp load_attribute(attribute, table) do + type = load_type(attribute.type) + + {type, size} = + case type do + {:varchar, size} -> + {:varchar, size} + + {:binary, size} -> + {:binary, size} + + {other, size} when is_atom(other) and is_integer(size) -> + {other, size} + + other -> + {other, nil} + end + + attribute = + if Map.has_key?(attribute, :name) do + Map.put(attribute, :source, String.to_atom(attribute.name)) + else + Map.update!(attribute, :source, &String.to_atom/1) + end + + attribute + |> Map.put(:type, type) + |> Map.put(:size, size) + |> Map.put_new(:default, "nil") + |> Map.update!(:default, &(&1 || "nil")) + |> Map.update!(:references, fn + nil -> + nil + + references -> + references + |> rewrite( + destination_field: :destination_attribute, + destination_field_default: :destination_attribute_default, + destination_field_generated: :destination_attribute_generated + ) + |> Map.delete(:ignore) + |> rewrite(:ignore?, :ignore) + |> Map.update!(:destination_attribute, &String.to_atom/1) + |> Map.put_new(:deferrable, false) + |> Map.update!(:deferrable, fn + "initially" -> :initially + other -> other + end) + |> Map.put_new(:destination_attribute_default, "nil") + |> Map.put_new(:destination_attribute_generated, false) + |> Map.put_new(:on_delete, nil) + |> Map.put_new(:on_update, nil) + |> Map.update!(:on_delete, &(&1 && String.to_atom(&1))) + |> Map.update!(:on_update, &(&1 && String.to_atom(&1))) + |> Map.put( + :name, + Map.get(references, :name) || "#{table}_#{attribute.source}_fkey" + ) + |> Map.put_new(:multitenancy, %{ + attribute: nil, + strategy: nil, + global: nil + }) + |> Map.update!(:multitenancy, &load_multitenancy/1) + |> sanitize_name(table) + end) + end + + defp rewrite(map, keys) do + Enum.reduce(keys, map, fn {key, to}, map -> + rewrite(map, key, to) + end) + end + + defp rewrite(map, key, to) do + if Map.has_key?(map, key) do + map + |> Map.put(to, Map.get(map, key)) + |> Map.delete(key) + else + map + end + end + + defp sanitize_name(reference, table) do + if String.starts_with?(reference.name, "_") do + Map.put(reference, :name, "#{table}#{reference.name}") + else + reference + end + end + + defp load_type(["array", type]) do + {:array, load_type(type)} + end + + defp load_type(["varchar", size]) do + {:varchar, size} + end + + defp load_type(["binary", size]) do + {:binary, size} + end + + defp load_type([string, size]) when is_binary(string) and is_integer(size) do + {String.to_existing_atom(string), size} + end + + defp load_type(type) do + String.to_atom(type) + end + + defp load_identity(identity, table) do + identity + |> Map.update!(:name, &String.to_atom/1) + |> Map.update!(:keys, fn keys -> + keys + |> Enum.map(&String.to_atom/1) + |> Enum.sort() + end) + |> add_index_name(table) + |> Map.put_new(:base_filter, nil) + end + + defp add_index_name(%{name: name} = index, table) do + Map.put_new(index, :index_name, "#{table}_#{name}_unique_index") + end +end diff --git a/lib/migration_generator/operation.ex b/lib/migration_generator/operation.ex new file mode 100644 index 0000000..54d2a8b --- /dev/null +++ b/lib/migration_generator/operation.ex @@ -0,0 +1,784 @@ +defmodule AshSqlite.MigrationGenerator.Operation do + @moduledoc false + + defmodule Helper do + @moduledoc false + def join(list), + do: + list + |> List.flatten() + |> Enum.reject(&is_nil/1) + |> Enum.join(", ") + |> String.replace(", )", ")") + + def maybe_add_default("nil"), do: nil + def maybe_add_default(value), do: "default: #{value}" + + def maybe_add_primary_key(true), do: "primary_key: true" + def maybe_add_primary_key(_), do: nil + + def maybe_add_null(false), do: "null: false" + def maybe_add_null(_), do: nil + + def in_quotes(nil), do: nil + def in_quotes(value), do: "\"#{value}\"" + + def as_atom(value) when is_atom(value), do: Macro.inspect_atom(:remote_call, value) + # sobelow_skip ["DOS.StringToAtom"] + def as_atom(value), do: Macro.inspect_atom(:remote_call, String.to_atom(value)) + + def option(key, value) do + if value do + "#{as_atom(key)}: #{inspect(value)}" + end + end + + def on_delete(%{on_delete: on_delete}) when on_delete in [:delete, :nilify] do + "on_delete: :#{on_delete}_all" + end + + def on_delete(%{on_delete: on_delete}) when is_atom(on_delete) and not is_nil(on_delete) do + "on_delete: :#{on_delete}" + end + + def on_delete(_), do: nil + + def on_update(%{on_update: on_update}) when on_update in [:update, :nilify] do + "on_update: :#{on_update}_all" + end + + def on_update(%{on_update: on_update}) when is_atom(on_update) and not is_nil(on_update) do + "on_update: :#{on_update}" + end + + def on_update(_), do: nil + + def reference_type( + %{type: :integer}, + %{destination_attribute_generated: true, destination_attribute_default: "nil"} + ) do + :bigint + end + + def reference_type(%{type: type}, _) do + type + end + end + + defmodule CreateTable do + @moduledoc false + defstruct [:table, :multitenancy, :old_multitenancy] + end + + defmodule AddAttribute do + @moduledoc false + defstruct [:attribute, :table, :multitenancy, :old_multitenancy] + + import Helper + + def up(%{ + multitenancy: %{strategy: :attribute, attribute: source_attribute}, + attribute: + %{ + references: + %{ + table: table, + destination_attribute: reference_attribute, + multitenancy: %{strategy: :attribute, attribute: destination_attribute} + } = reference + } = attribute + }) do + with_match = + if destination_attribute != reference_attribute do + "with: [#{as_atom(source_attribute)}: :#{as_atom(destination_attribute)}], match: :full" + end + + size = + if attribute[:size] do + "size: #{attribute[:size]}" + end + + [ + "add #{inspect(attribute.source)}", + "references(:#{as_atom(table)}", + [ + "column: #{inspect(reference_attribute)}", + with_match, + "name: #{inspect(reference.name)}", + "type: #{inspect(reference_type(attribute, reference))}", + on_delete(reference), + on_update(reference), + size + ], + ")", + maybe_add_default(attribute.default), + maybe_add_primary_key(attribute.primary_key?), + maybe_add_null(attribute.allow_nil?) + ] + |> join() + end + + def up(%{ + attribute: + %{ + references: + %{ + table: table, + destination_attribute: destination_attribute + } = reference + } = attribute + }) do + size = + if attribute[:size] do + "size: #{attribute[:size]}" + end + + [ + "add #{inspect(attribute.source)}", + "references(:#{as_atom(table)}", + [ + "column: #{inspect(destination_attribute)}", + "name: #{inspect(reference.name)}", + "type: #{inspect(reference_type(attribute, reference))}", + size, + on_delete(reference), + on_update(reference) + ], + ")", + maybe_add_default(attribute.default), + maybe_add_primary_key(attribute.primary_key?), + maybe_add_null(attribute.allow_nil?) + ] + |> join() + end + + def up(%{attribute: %{type: :bigint, default: "nil", generated?: true} = attribute}) do + [ + "add #{inspect(attribute.source)}", + ":bigserial", + maybe_add_null(attribute.allow_nil?), + maybe_add_primary_key(attribute.primary_key?) + ] + |> join() + end + + def up(%{attribute: %{type: :integer, default: "nil", generated?: true} = attribute}) do + [ + "add #{inspect(attribute.source)}", + ":serial", + maybe_add_null(attribute.allow_nil?), + maybe_add_primary_key(attribute.primary_key?) + ] + |> join() + end + + def up(%{attribute: attribute}) do + size = + if attribute[:size] do + "size: #{attribute[:size]}" + end + + [ + "add #{inspect(attribute.source)}", + "#{inspect(attribute.type)}", + maybe_add_null(attribute.allow_nil?), + maybe_add_default(attribute.default), + size, + maybe_add_primary_key(attribute.primary_key?) + ] + |> join() + end + + def down( + %{ + attribute: attribute, + table: table, + multitenancy: multitenancy + } = op + ) do + AshSqlite.MigrationGenerator.Operation.RemoveAttribute.up(%{ + op + | attribute: attribute, + table: table, + multitenancy: multitenancy + }) + end + end + + defmodule AlterDeferrability do + @moduledoc false + defstruct [:table, :references, :direction, no_phase: true] + + def up(%{direction: :up, table: table, references: %{name: name, deferrable: true}}) do + "execute(\"ALTER TABLE #{table} alter CONSTRAINT #{name} DEFERRABLE INITIALLY IMMEDIATE\");" + end + + def up(%{direction: :up, table: table, references: %{name: name, deferrable: :initially}}) do + "execute(\"ALTER TABLE #{table} alter CONSTRAINT #{name} DEFERRABLE INITIALLY DEFERRED\");" + end + + def up(%{direction: :up, table: table, references: %{name: name}}) do + "execute(\"ALTER TABLE #{table} alter CONSTRAINT #{name} NOT DEFERRABLE\");" + end + + def up(_), do: "" + + def down(%{direction: :down} = data), do: up(%{data | direction: :up}) + def down(_), do: "" + end + + defmodule AlterAttribute do + @moduledoc false + defstruct [ + :old_attribute, + :new_attribute, + :table, + :multitenancy, + :old_multitenancy + ] + + import Helper + + defp alter_opts(attribute, old_attribute) do + primary_key = + cond do + attribute.primary_key? and !old_attribute.primary_key? -> + ", primary_key: true" + + old_attribute.primary_key? and !attribute.primary_key? -> + ", primary_key: false" + + true -> + nil + end + + default = + if attribute.default != old_attribute.default do + if is_nil(attribute.default) do + ", default: nil" + else + ", default: #{attribute.default}" + end + end + + null = + if attribute.allow_nil? != old_attribute.allow_nil? do + ", null: #{attribute.allow_nil?}" + end + + "#{null}#{default}#{primary_key}" + end + + def up(%{ + multitenancy: multitenancy, + old_attribute: old_attribute, + new_attribute: attribute + }) do + type_or_reference = + if AshSqlite.MigrationGenerator.has_reference?(multitenancy, attribute) and + Map.get(old_attribute, :references) != Map.get(attribute, :references) do + reference(multitenancy, attribute) + else + inspect(attribute.type) + end + + "modify #{inspect(attribute.source)}, #{type_or_reference}#{alter_opts(attribute, old_attribute)}" + end + + defp reference( + %{strategy: :attribute, attribute: source_attribute}, + %{ + references: + %{ + multitenancy: %{strategy: :attribute, attribute: destination_attribute}, + table: table, + destination_attribute: reference_attribute + } = reference + } = attribute + ) do + with_match = + if destination_attribute != reference_attribute do + "with: [#{as_atom(source_attribute)}: :#{as_atom(destination_attribute)}], match: :full" + end + + size = + if attribute[:size] do + "size: #{attribute[:size]}" + end + + join([ + "references(:#{as_atom(table)}, column: #{inspect(reference_attribute)}", + with_match, + "name: #{inspect(reference.name)}", + "type: #{inspect(reference_type(attribute, reference))}", + size, + on_delete(reference), + on_update(reference), + ")" + ]) + end + + defp reference( + _, + %{ + references: + %{ + table: table, + destination_attribute: destination_attribute + } = reference + } = attribute + ) do + size = + if attribute[:size] do + "size: #{attribute[:size]}" + end + + join([ + "references(:#{as_atom(table)}, column: #{inspect(destination_attribute)}", + "name: #{inspect(reference.name)}", + "type: #{inspect(reference_type(attribute, reference))}", + size, + on_delete(reference), + on_update(reference), + ")" + ]) + end + + def down(op) do + up(%{ + op + | old_attribute: op.new_attribute, + new_attribute: op.old_attribute, + old_multitenancy: op.multitenancy, + multitenancy: op.old_multitenancy + }) + end + end + + defmodule DropForeignKey do + @moduledoc false + # We only run this migration in one direction, based on the input + # This is because the creation of a foreign key is handled by `references/3` + # We only need to drop it before altering an attribute with `references/3` + defstruct [:attribute, :table, :multitenancy, :direction, no_phase: true] + + import Helper + + def up(%{table: table, attribute: %{references: reference}, direction: :up}) do + "drop constraint(:#{as_atom(table)}, #{join([inspect(reference.name)])})" + end + + def up(_) do + "" + end + + def down(%{ + table: table, + attribute: %{references: reference}, + direction: :down + }) do + "drop constraint(:#{as_atom(table)}, #{join([inspect(reference.name)])})" + end + + def down(_) do + "" + end + end + + defmodule RenameAttribute do + @moduledoc false + defstruct [ + :old_attribute, + :new_attribute, + :table, + :multitenancy, + :old_multitenancy, + no_phase: true + ] + + import Helper + + def up(%{ + old_attribute: old_attribute, + new_attribute: new_attribute, + table: table + }) do + table_statement = join([":#{as_atom(table)}"]) + + "rename table(#{table_statement}), #{inspect(old_attribute.source)}, to: #{inspect(new_attribute.source)}" + end + + def down( + %{ + old_attribute: old_attribute, + new_attribute: new_attribute + } = data + ) do + up(%{data | new_attribute: old_attribute, old_attribute: new_attribute}) + end + end + + defmodule RemoveAttribute do + @moduledoc false + defstruct [:attribute, :table, :multitenancy, :old_multitenancy, commented?: true] + + def up(%{attribute: attribute, commented?: true}) do + """ + # Attribute removal has been commented out to avoid data loss. See the migration generator documentation for more + # If you uncomment this, be sure to also uncomment the corresponding attribute *addition* in the `down` migration + # remove #{inspect(attribute.source)} + """ + end + + def up(%{attribute: attribute}) do + "remove #{inspect(attribute.source)}" + end + + def down(%{attribute: attribute, multitenancy: multitenancy, commented?: true}) do + prefix = """ + # This is the `down` migration of the statement: + # + # remove #{inspect(attribute.source)} + # + """ + + contents = + %AshSqlite.MigrationGenerator.Operation.AddAttribute{ + attribute: attribute, + multitenancy: multitenancy + } + |> AshSqlite.MigrationGenerator.Operation.AddAttribute.up() + |> String.split("\n") + |> Enum.map_join("\n", &"# #{&1}") + + prefix <> "\n" <> contents + end + + def down(%{attribute: attribute, multitenancy: multitenancy, table: table}) do + AshSqlite.MigrationGenerator.Operation.AddAttribute.up( + %AshSqlite.MigrationGenerator.Operation.AddAttribute{ + attribute: attribute, + table: table, + multitenancy: multitenancy + } + ) + end + end + + defmodule AddUniqueIndex do + @moduledoc false + defstruct [:identity, :table, :multitenancy, :old_multitenancy, no_phase: true] + + import Helper + + def up(%{ + identity: %{name: name, keys: keys, base_filter: base_filter, index_name: index_name}, + table: table, + multitenancy: multitenancy + }) do + keys = + case multitenancy.strategy do + :attribute -> + [multitenancy.attribute | keys] + + _ -> + keys + end + + index_name = index_name || "#{table}_#{name}_index" + + if base_filter do + "create unique_index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], where: \"#{base_filter}\", #{join(["name: \"#{index_name}\""])})" + else + "create unique_index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], #{join(["name: \"#{index_name}\""])})" + end + end + + def down(%{ + identity: %{name: name, keys: keys, index_name: index_name}, + table: table, + multitenancy: multitenancy + }) do + keys = + case multitenancy.strategy do + :attribute -> + [multitenancy.attribute | keys] + + _ -> + keys + end + + index_name = index_name || "#{table}_#{name}_index" + + "drop_if_exists unique_index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], #{join(["name: \"#{index_name}\""])})" + end + end + + defmodule AddCustomStatement do + @moduledoc false + defstruct [:statement, :table, no_phase: true] + + def up(%{statement: %{up: up, code?: false}}) do + """ + execute(\"\"\" + #{String.trim(up)} + \"\"\") + """ + end + + def up(%{statement: %{up: up, code?: true}}) do + up + end + + def down(%{statement: %{down: down, code?: false}}) do + """ + execute(\"\"\" + #{String.trim(down)} + \"\"\") + """ + end + + def down(%{statement: %{down: down, code?: true}}) do + down + end + end + + defmodule RemoveCustomStatement do + @moduledoc false + defstruct [:statement, :table, no_phase: true] + + def up(%{statement: statement, table: table}) do + AddCustomStatement.down(%AddCustomStatement{statement: statement, table: table}) + end + + def down(%{statement: statement, table: table}) do + AddCustomStatement.up(%AddCustomStatement{statement: statement, table: table}) + end + end + + defmodule AddCustomIndex do + @moduledoc false + defstruct [:table, :index, :base_filter, :multitenancy, no_phase: true] + import Helper + + def up(%{ + index: index, + table: table, + base_filter: base_filter, + multitenancy: multitenancy + }) do + keys = + case multitenancy.strategy do + :attribute -> + [to_string(multitenancy.attribute) | Enum.map(index.fields, &to_string/1)] + + _ -> + Enum.map(index.fields, &to_string/1) + end + + index = + if index.where && base_filter do + %{index | where: base_filter <> " AND " <> index.where} + else + index + end + + opts = + join([ + option(:name, index.name), + option(:unique, index.unique), + option(:using, index.using), + option(:where, index.where), + option(:include, index.include) + ]) + + if opts == "", + do: "create index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}])", + else: + "create index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], #{opts})" + end + + def down(%{index: index, table: table, multitenancy: multitenancy}) do + index_name = AshSqlite.CustomIndex.name(table, index) + + keys = + case multitenancy.strategy do + :attribute -> + [to_string(multitenancy.attribute) | Enum.map(index.fields, &to_string/1)] + + _ -> + Enum.map(index.fields, &to_string/1) + end + + "drop_if_exists index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], #{join(["name: \"#{index_name}\""])})" + end + end + + defmodule RemovePrimaryKey do + @moduledoc false + defstruct [:table, no_phase: true] + + def up(%{table: table}) do + "drop constraint(#{inspect(table)}, \"#{table}_pkey\")" + end + + def down(_) do + "" + end + end + + defmodule RemovePrimaryKeyDown do + @moduledoc false + defstruct [:table, no_phase: true] + + def up(_) do + "" + end + + def down(%{table: table}) do + "drop constraint(#{inspect(table)}, \"#{table}_pkey\")" + end + end + + defmodule RemoveCustomIndex do + @moduledoc false + defstruct [:table, :index, :base_filter, :multitenancy, no_phase: true] + import Helper + + def up(%{index: index, table: table, multitenancy: multitenancy}) do + index_name = AshSqlite.CustomIndex.name(table, index) + + keys = + case multitenancy.strategy do + :attribute -> + [to_string(multitenancy.attribute) | Enum.map(index.fields, &to_string/1)] + + _ -> + Enum.map(index.fields, &to_string/1) + end + + "drop_if_exists index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], #{join(["name: \"#{index_name}\""])})" + end + + def down(%{ + index: index, + table: table, + base_filter: base_filter, + multitenancy: multitenancy + }) do + keys = + case multitenancy.strategy do + :attribute -> + [to_string(multitenancy.attribute) | Enum.map(index.fields, &to_string/1)] + + _ -> + Enum.map(index.fields, &to_string/1) + end + + index = + if index.where && base_filter do + %{index | where: base_filter <> " AND " <> index.where} + else + index + end + + opts = + join([ + option(:name, index.name), + option(:unique, index.unique), + option(:using, index.using), + option(:where, index.where), + option(:include, index.include) + ]) + + if opts == "" do + "create index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}])" + else + "create index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], #{opts})" + end + end + end + + defmodule RenameUniqueIndex do + @moduledoc false + defstruct [ + :new_identity, + :old_identity, + :table, + :multitenancy, + :old_multitenancy, + no_phase: true + ] + + def up(%{ + old_identity: %{index_name: old_index_name, name: old_name}, + new_identity: %{index_name: new_index_name}, + table: table + }) do + old_index_name = old_index_name || "#{table}_#{old_name}_index" + + "execute(\"ALTER INDEX #{old_index_name} " <> + "RENAME TO #{new_index_name}\")\n" + end + + def down(%{ + old_identity: %{index_name: old_index_name, name: old_name}, + new_identity: %{index_name: new_index_name}, + table: table + }) do + old_index_name = old_index_name || "#{table}_#{old_name}_index" + + "execute(\"ALTER INDEX #{new_index_name} " <> + "RENAME TO #{old_index_name}\")\n" + end + end + + defmodule RemoveUniqueIndex do + @moduledoc false + defstruct [:identity, :table, :multitenancy, :old_multitenancy, no_phase: true] + + import Helper + + def up(%{ + identity: %{name: name, keys: keys, index_name: index_name}, + table: table, + old_multitenancy: multitenancy + }) do + keys = + case multitenancy.strategy do + :attribute -> + [multitenancy.attribute | keys] + + _ -> + keys + end + + index_name = index_name || "#{table}_#{name}_index" + + "drop_if_exists unique_index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], #{join(["name: \"#{index_name}\""])})" + end + + def down(%{ + identity: %{name: name, keys: keys, base_filter: base_filter, index_name: index_name}, + table: table, + multitenancy: multitenancy + }) do + keys = + case multitenancy.strategy do + :attribute -> + [multitenancy.attribute | keys] + + _ -> + keys + end + + index_name = index_name || "#{table}_#{name}_index" + + if base_filter do + "create unique_index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], where: \"#{base_filter}\", #{join(["name: \"#{index_name}\""])})" + else + "create unique_index(:#{as_atom(table)}, [#{Enum.map_join(keys, ", ", &inspect/1)}], #{join(["name: \"#{index_name}\""])})" + end + end + end +end diff --git a/lib/migration_generator/phase.ex b/lib/migration_generator/phase.ex new file mode 100644 index 0000000..1ed4f3e --- /dev/null +++ b/lib/migration_generator/phase.ex @@ -0,0 +1,66 @@ +defmodule AshSqlite.MigrationGenerator.Phase do + @moduledoc false + + defmodule Create do + @moduledoc false + defstruct [:table, :multitenancy, operations: [], commented?: false] + + import AshSqlite.MigrationGenerator.Operation.Helper, only: [as_atom: 1] + + def up(%{table: table, operations: operations}) do + opts = "" + + "create table(:#{as_atom(table)}, primary_key: false#{opts}) do\n" <> + Enum.map_join(operations, "\n", fn operation -> operation.__struct__.up(operation) end) <> + "\nend" + end + + def down(%{table: table}) do + opts = "" + + "drop table(:#{as_atom(table)}#{opts})" + end + end + + defmodule Alter do + @moduledoc false + defstruct [:table, :multitenancy, operations: [], commented?: false] + + import AshSqlite.MigrationGenerator.Operation.Helper, only: [as_atom: 1] + + def up(%{table: table, operations: operations}) do + body = + operations + |> Enum.map_join("\n", fn operation -> operation.__struct__.up(operation) end) + |> String.trim() + + if body == "" do + "" + else + opts = "" + + "alter table(:#{as_atom(table)}#{opts}) do\n" <> + body <> + "\nend" + end + end + + def down(%{table: table, operations: operations}) do + body = + operations + |> Enum.reverse() + |> Enum.map_join("\n", fn operation -> operation.__struct__.down(operation) end) + |> String.trim() + + if body == "" do + "" + else + opts = "" + + "alter table(:#{as_atom(table)}#{opts}) do\n" <> + body <> + "\nend" + end + end + end +end diff --git a/lib/mix/helpers.ex b/lib/mix/helpers.ex new file mode 100644 index 0000000..8b8470f --- /dev/null +++ b/lib/mix/helpers.ex @@ -0,0 +1,132 @@ +defmodule AshSqlite.Mix.Helpers do + @moduledoc false + def domains!(opts, args) do + apps = + if apps_paths = Mix.Project.apps_paths() do + apps_paths |> Map.keys() |> Enum.sort() + else + [Mix.Project.config()[:app]] + end + + configured_domains = Enum.flat_map(apps, &Application.get_env(&1, :ash_domains, [])) + + domains = + if opts[:domains] && opts[:domains] != "" do + opts[:domains] + |> Kernel.||("") + |> String.split(",") + |> Enum.flat_map(fn + "" -> + [] + + domain -> + [Module.concat([domain])] + end) + else + configured_domains + end + + domains + |> Enum.map(&ensure_compiled(&1, args)) + |> case do + [] -> + raise "must supply the --domains argument, or set `config :my_app, ash_domains: [...]` in config" + + domains -> + domains + end + end + + def repos!(opts, args) do + domains = domains!(opts, args) + + resources = + domains + |> Enum.flat_map(&Ash.Domain.Info.resources/1) + |> Enum.filter(&(Ash.DataLayer.data_layer(&1) == AshSqlite.DataLayer)) + |> case do + [] -> + raise """ + No resources with `data_layer: AshSqlite.DataLayer` found in the domains #{Enum.map_join(domains, ",", &inspect/1)}. + + Must be able to find at least one resource with `data_layer: AshSqlite.DataLayer`. + """ + + resources -> + resources + end + + resources + |> Enum.map(&AshSqlite.DataLayer.Info.repo(&1)) + |> Enum.uniq() + |> case do + [] -> + raise """ + No repos could be found configured on the resources in the domains: #{Enum.map_join(domains, ",", &inspect/1)} + + At least one resource must have a repo configured. + + The following resources were found with `data_layer: AshSqlite.DataLayer`: + + #{Enum.map_join(resources, "\n", &"* #{inspect(&1)}")} + """ + + repos -> + repos + end + end + + def delete_flag(args, arg) do + case Enum.split_while(args, &(&1 != arg)) do + {left, [_ | rest]} -> + left ++ rest + + _ -> + args + end + end + + def delete_arg(args, arg) do + case Enum.split_while(args, &(&1 != arg)) do + {left, [_, _ | rest]} -> + left ++ rest + + _ -> + args + end + end + + defp ensure_compiled(domain, args) do + if Code.ensure_loaded?(Mix.Tasks.App.Config) do + Mix.Task.run("app.config", args) + else + Mix.Task.run("loadpaths", args) + "--no-compile" not in args && Mix.Task.run("compile", args) + end + + case Code.ensure_compiled(domain) do + {:module, _} -> + domain + |> Ash.Domain.Info.resources() + |> Enum.each(&Code.ensure_compiled/1) + + # TODO: We shouldn't need to make sure that the resources are compiled + + domain + + {:error, error} -> + Mix.raise("Could not load #{inspect(domain)}, error: #{inspect(error)}. ") + end + end + + def migrations_path(opts, repo) do + opts[:migrations_path] || repo.config()[:migrations_path] || derive_migrations_path(repo) + end + + def derive_migrations_path(repo) do + config = repo.config() + priv = config[:priv] || "priv/#{repo |> Module.split() |> List.last() |> Macro.underscore()}" + app = Keyword.fetch!(config, :otp_app) + Application.app_dir(app, Path.join(priv, "migrations")) + end +end diff --git a/lib/mix/tasks/ash_sqlite.create.ex b/lib/mix/tasks/ash_sqlite.create.ex new file mode 100644 index 0000000..a5da23e --- /dev/null +++ b/lib/mix/tasks/ash_sqlite.create.ex @@ -0,0 +1,50 @@ +defmodule Mix.Tasks.AshSqlite.Create do + use Mix.Task + + @shortdoc "Creates the repository storage" + + @switches [ + quiet: :boolean, + domains: :string, + no_compile: :boolean, + no_deps_check: :boolean + ] + + @aliases [ + q: :quiet + ] + + @moduledoc """ + Create the storage for repos in all resources for the given (or configured) domains. + + ## Examples + + mix ash_sqlite.create + mix ash_sqlite.create --domains MyApp.Domain1,MyApp.Domain2 + + ## Command line options + + * `--domains` - the domains who's repos you want to migrate. + * `--quiet` - do not log output + * `--no-compile` - do not compile before creating + * `--no-deps-check` - do not compile before creating + """ + + @doc false + def run(args) do + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + + repos = AshSqlite.Mix.Helpers.repos!(opts, args) + + repo_args = + Enum.flat_map(repos, fn repo -> + ["-r", to_string(repo)] + end) + + rest_opts = AshSqlite.Mix.Helpers.delete_arg(args, "--domains") + + Mix.Task.reenable("ecto.create") + + Mix.Task.run("ecto.create", repo_args ++ rest_opts) + end +end diff --git a/lib/mix/tasks/ash_sqlite.drop.ex b/lib/mix/tasks/ash_sqlite.drop.ex new file mode 100644 index 0000000..6dbc968 --- /dev/null +++ b/lib/mix/tasks/ash_sqlite.drop.ex @@ -0,0 +1,58 @@ +defmodule Mix.Tasks.AshSqlite.Drop do + use Mix.Task + + @shortdoc "Drops the repository storage for the repos in the specified (or configured) domains" + @default_opts [force: false, force_drop: false] + + @aliases [ + f: :force, + q: :quiet + ] + + @switches [ + force: :boolean, + force_drop: :boolean, + quiet: :boolean, + domains: :string, + no_compile: :boolean, + no_deps_check: :boolean + ] + + @moduledoc """ + Drop the storage for the given repository. + + ## Examples + + mix ash_sqlite.drop + mix ash_sqlite.drop -r MyApp.Domain1,MyApp.Domain2 + + ## Command line options + + * `--doains` - the domains who's repos should be dropped + * `-q`, `--quiet` - run the command quietly + * `-f`, `--force` - do not ask for confirmation when dropping the database. + Configuration is asked only when `:start_permanent` is set to true + (typically in production) + * `--no-compile` - do not compile before dropping + * `--no-deps-check` - do not compile before dropping + """ + + @doc false + def run(args) do + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + opts = Keyword.merge(@default_opts, opts) + + repos = AshSqlite.Mix.Helpers.repos!(opts, args) + + repo_args = + Enum.flat_map(repos, fn repo -> + ["-r", to_string(repo)] + end) + + rest_opts = AshSqlite.Mix.Helpers.delete_arg(args, "--domains") + + Mix.Task.reenable("ecto.drop") + + Mix.Task.run("ecto.drop", repo_args ++ rest_opts) + end +end diff --git a/lib/mix/tasks/ash_sqlite.generate_migrations.ex b/lib/mix/tasks/ash_sqlite.generate_migrations.ex new file mode 100644 index 0000000..02d70d8 --- /dev/null +++ b/lib/mix/tasks/ash_sqlite.generate_migrations.ex @@ -0,0 +1,95 @@ +defmodule Mix.Tasks.AshSqlite.GenerateMigrations do + @moduledoc """ + Generates migrations, and stores a snapshot of your resources. + + Options: + + * `domains` - a comma separated list of domain modules, for which migrations will be generated + * `snapshot-path` - a custom path to store the snapshots, defaults to "priv/resource_snapshots" + * `migration-path` - a custom path to store the migrations, defaults to "priv". + Migrations are stored in a folder for each repo, so `priv/repo_name/migrations` + * `drop-columns` - whether or not to drop columns as attributes are removed. See below for more + * `name` - + names the generated migrations, prepending with the timestamp. The default is `migrate_resources_`, + where `` is the count of migrations matching `*migrate_resources*` plus one. + For example, `--name add_special_column` would get a name like `20210708181402_add_special_column.exs` + + Flags: + + * `quiet` - messages for file creations will not be printed + * `no-format` - files that are created will not be formatted with the code formatter + * `dry-run` - no files are created, instead the new migration is printed + * `check` - no files are created, returns an exit(1) code if the current snapshots and resources don't fit + + #### Snapshots + + Snapshots are stored in a folder for each table that migrations are generated for. Each snapshot is + stored in a file with a timestamp of when it was generated. + This is important because it allows for simultaneous work to be done on separate branches, and for rolling back + changes more easily, e.g removing a generated migration, and deleting the most recent snapshot, without having to redo + all of it + + #### Dropping columns + + Generally speaking, it is bad practice to drop columns when you deploy a change that + would remove an attribute. The main reasons for this are backwards compatibility and rolling restarts. + If you deploy an attribute removal, and run migrations. Regardless of your deployment sstrategy, you + won't be able to roll back, because the data has been deleted. In a rolling restart situation, some of + the machines/pods/whatever may still be running after the column has been deleted, causing errors. With + this in mind, its best not to delete those columns until later, after the data has been confirmed unnecessary. + To that end, the migration generator leaves the column dropping code commented. You can pass `--drop_columns` + to tell it to uncomment those statements. Additionally, you can just uncomment that code on a case by case + basis. + + #### Conflicts/Multiple Resources + + It will raise on conflicts that it can't resolve, like the same field with different + types. It will prompt to resolve conflicts that can be resolved with human input. + For example, if you remove an attribute and add an attribute, it will ask you if you are renaming + the column in question. If not, it will remove one column and add the other. + + Additionally, it lowers things to the database where possible: + + #### Defaults + There are three anonymous functions that will translate to database-specific defaults currently: + + * `&DateTime.utc_now/0` + + Non-function default values will be dumped to their native type and inspected. This may not work for some types, + and may require manual intervention/patches to the migration generator code. + + #### Identities + + Identities will cause the migration generator to generate unique constraints. If multiple + resources target the same table, you will be asked to select the primary key, and any others + will be added as unique constraints. + """ + use Mix.Task + + @shortdoc "Generates migrations, and stores a snapshot of your resources" + def run(args) do + {opts, _} = + OptionParser.parse!(args, + strict: [ + domains: :string, + snapshot_path: :string, + migration_path: :string, + quiet: :boolean, + name: :string, + no_format: :boolean, + dry_run: :boolean, + check: :boolean, + drop_columns: :boolean + ] + ) + + domains = AshSqlite.Mix.Helpers.domains!(opts, args) + + opts = + opts + |> Keyword.put(:format, !opts[:no_format]) + |> Keyword.delete(:no_format) + + AshSqlite.MigrationGenerator.generate(domains, opts) + end +end diff --git a/lib/mix/tasks/ash_sqlite.migrate.ex b/lib/mix/tasks/ash_sqlite.migrate.ex new file mode 100644 index 0000000..8e4f058 --- /dev/null +++ b/lib/mix/tasks/ash_sqlite.migrate.ex @@ -0,0 +1,116 @@ +defmodule Mix.Tasks.AshSqlite.Migrate do + use Mix.Task + + import AshSqlite.Mix.Helpers, + only: [migrations_path: 2] + + @shortdoc "Runs the repository migrations for all repositories in the provided (or congigured) domains" + + @aliases [ + n: :step + ] + + @switches [ + all: :boolean, + step: :integer, + to: :integer, + quiet: :boolean, + pool_size: :integer, + log_sql: :boolean, + strict_version_order: :boolean, + domains: :string, + no_compile: :boolean, + no_deps_check: :boolean, + migrations_path: :keep + ] + + @moduledoc """ + Runs the pending migrations for the given repository. + + Migrations are expected at "priv/YOUR_REPO/migrations" directory + of the current application, where "YOUR_REPO" is the last segment + in your repository name. For example, the repository `MyApp.Repo` + will use "priv/repo/migrations". The repository `Whatever.MyRepo` + will use "priv/my_repo/migrations". + + This task runs all pending migrations by default. To migrate up to a + specific version number, supply `--to version_number`. To migrate a + specific number of times, use `--step n`. + + This is only really useful if your domain or domains only use a single repo. + If you have multiple repos and you want to run a single migration and/or + migrate/roll them back to different points, you will need to use the + ecto specific task, `mix ecto.migrate` and provide your repo name. + + If a repository has not yet been started, one will be started outside + your application supervision tree and shutdown afterwards. + + ## Examples + + mix ash_sqlite.migrate + mix ash_sqlite.migrate --domains MyApp.Domain1,MyApp.Domain2 + + mix ash_sqlite.migrate -n 3 + mix ash_sqlite.migrate --step 3 + + mix ash_sqlite.migrate --to 20080906120000 + + ## Command line options + + * `--domains` - the domains who's repos should be migrated + + * `--all` - run all pending migrations + + * `--step`, `-n` - run n number of pending migrations + + * `--to` - run all migrations up to and including version + + * `--quiet` - do not log migration commands + + * `--pool-size` - the pool size if the repository is started only for the task (defaults to 2) + + * `--log-sql` - log the raw sql migrations are running + + * `--strict-version-order` - abort when applying a migration with old timestamp + + * `--no-compile` - does not compile applications before migrating + + * `--no-deps-check` - does not check depedendencies before migrating + + * `--migrations-path` - the path to load the migrations from, defaults to + `"priv/repo/migrations"`. This option may be given multiple times in which case the migrations + are loaded from all the given directories and sorted as if they were in the same one. + + Note, if you have migrations paths e.g. `a/` and `b/`, and run + `mix ecto.migrate --migrations-path a/`, the latest migrations from `a/` will be run (even + if `b/` contains the overall latest migrations.) + """ + + @impl true + def run(args) do + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + + repos = AshSqlite.Mix.Helpers.repos!(opts, args) + + repo_args = + Enum.flat_map(repos, fn repo -> + ["-r", to_string(repo)] + end) + + rest_opts = + args + |> AshSqlite.Mix.Helpers.delete_arg("--domains") + |> AshSqlite.Mix.Helpers.delete_arg("--migrations-path") + + Mix.Task.reenable("ecto.migrate") + + for repo <- repos do + Mix.Task.run( + "ecto.migrate", + repo_args ++ rest_opts ++ ["--migrations-path", migrations_path(opts, repo)] + ) + + Mix.Task.reenable("ecto.migrate") + end + end +end diff --git a/lib/mix/tasks/ash_sqlite.rollback.ex b/lib/mix/tasks/ash_sqlite.rollback.ex new file mode 100644 index 0000000..0ae4c19 --- /dev/null +++ b/lib/mix/tasks/ash_sqlite.rollback.ex @@ -0,0 +1,81 @@ +defmodule Mix.Tasks.AshSqlite.Rollback do + use Mix.Task + + import AshSqlite.Mix.Helpers, + only: [migrations_path: 2] + + @shortdoc "Rolls back the repository migrations for all repositories in the provided (or configured) domains" + + @moduledoc """ + Reverts applied migrations in the given repository. + Migrations are expected at "priv/YOUR_REPO/migrations" directory + of the current application but it can be configured by specifying + the `:priv` key under the repository configuration. + Runs the latest applied migration by default. To roll back to + a version number, supply `--to version_number`. To roll back a + specific number of times, use `--step n`. To undo all applied + migrations, provide `--all`. + + This is only really useful if your domain or domains only use a single repo. + If you have multiple repos and you want to run a single migration and/or + migrate/roll them back to different points, you will need to use the + ecto specific task, `mix ecto.migrate` and provide your repo name. + + ## Examples + mix ash_sqlite.rollback + mix ash_sqlite.rollback -r Custom.Repo + mix ash_sqlite.rollback -n 3 + mix ash_sqlite.rollback --step 3 + mix ash_sqlite.rollback -v 20080906120000 + mix ash_sqlite.rollback --to 20080906120000 + + ## Command line options + * `--domains` - the domains who's repos should be rolledback + * `--all` - revert all applied migrations + * `--step` / `-n` - revert n number of applied migrations + * `--to` / `-v` - revert all migrations down to and including version + * `--quiet` - do not log migration commands + * `--pool-size` - the pool size if the repository is started only for the task (defaults to 1) + * `--log-sql` - log the raw sql migrations are running + """ + + @doc false + def run(args) do + {opts, _, _} = + OptionParser.parse(args, + switches: [ + all: :boolean, + step: :integer, + to: :integer, + start: :boolean, + quiet: :boolean, + pool_size: :integer, + log_sql: :boolean + ], + aliases: [n: :step, v: :to] + ) + + repos = AshSqlite.Mix.Helpers.repos!(opts, args) + + repo_args = + Enum.flat_map(repos, fn repo -> + ["-r", to_string(repo)] + end) + + rest_opts = + args + |> AshSqlite.Mix.Helpers.delete_arg("--domains") + |> AshSqlite.Mix.Helpers.delete_arg("--migrations-path") + + Mix.Task.reenable("ecto.rollback") + + for repo <- repos do + Mix.Task.run( + "ecto.rollback", + repo_args ++ rest_opts ++ ["--migrations-path", migrations_path(opts, repo)] + ) + + Mix.Task.reenable("ecto.rollback") + end + end +end diff --git a/lib/reference.ex b/lib/reference.ex new file mode 100644 index 0000000..275bfd7 --- /dev/null +++ b/lib/reference.ex @@ -0,0 +1,43 @@ +defmodule AshSqlite.Reference do + @moduledoc "Represents the configuration of a reference (i.e foreign key)." + defstruct [:relationship, :on_delete, :on_update, :name, :deferrable, ignore?: false] + + def schema do + [ + relationship: [ + type: :atom, + required: true, + doc: "The relationship to be configured" + ], + ignore?: [ + type: :boolean, + doc: + "If set to true, no reference is created for the given relationship. This is useful if you need to define it in some custom way" + ], + on_delete: [ + type: {:one_of, [:delete, :nilify, :nothing, :restrict]}, + doc: """ + What should happen to records of this resource when the referenced record of the *destination* resource is deleted. + """ + ], + on_update: [ + type: {:one_of, [:update, :nilify, :nothing, :restrict]}, + doc: """ + What should happen to records of this resource when the referenced destination_attribute of the *destination* record is update. + """ + ], + deferrable: [ + type: {:one_of, [false, true, :initially]}, + default: false, + doc: """ + Wether or not the constraint is deferrable. This only affects the migration generator. + """ + ], + name: [ + type: :string, + doc: + "The name of the foreign key to generate in the database. Defaults to
__fkey" + ] + ] + end +end diff --git a/lib/repo.ex b/lib/repo.ex new file mode 100644 index 0000000..2bed9af --- /dev/null +++ b/lib/repo.ex @@ -0,0 +1,155 @@ +defmodule AshSqlite.Repo do + @moduledoc """ + Resources that use `AshSqlite.DataLayer` use a `Repo` to access the database. + + This repo is a thin wrapper around an `Ecto.Repo`. + + You can use `Ecto.Repo`'s `init/2` to configure your repo like normal, but + instead of returning `{:ok, config}`, use `super(config)` to pass the + configuration to the `AshSqlite.Repo` implementation. + """ + + @doc "Use this to inform the data layer about what extensions are installed" + @callback installed_extensions() :: [String.t()] + + @doc """ + Use this to inform the data layer about the oldest potential sqlite version it will be run on. + + Must be an integer greater than or equal to 13. + """ + @callback min_pg_version() :: integer() + + @doc "The path where your migrations are stored" + @callback migrations_path() :: String.t() | nil + @doc "Allows overriding a given migration type for *all* fields, for example if you wanted to always use :timestamptz for :utc_datetime fields" + @callback override_migration_type(atom) :: atom + + defmacro __using__(opts) do + quote bind_quoted: [opts: opts] do + otp_app = opts[:otp_app] || raise("Must configure OTP app") + + use Ecto.Repo, + adapter: Ecto.Adapters.MyXQL, + otp_app: otp_app + + @behaviour AshSqlite.Repo + + defoverridable insert: 2, insert: 1, insert!: 2, insert!: 1 + + def installed_extensions, do: [] + def migrations_path, do: nil + def override_migration_type(type), do: type + def min_pg_version, do: 10 + + def init(_, config) do + new_config = + config + |> Keyword.put(:installed_extensions, installed_extensions()) + |> Keyword.put(:migrations_path, migrations_path()) + |> Keyword.put(:case_sensitive_like, :on) + + {:ok, new_config} + end + + def insert(struct_or_changeset, opts \\ []) do + struct_or_changeset + |> to_ecto() + |> then(fn value -> + repo = get_dynamic_repo() + + Ecto.Repo.Schema.insert( + __MODULE__, + repo, + value, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:insert, opts)) + ) + end) + |> from_ecto() + end + + def insert!(struct_or_changeset, opts \\ []) do + struct_or_changeset + |> to_ecto() + |> then(fn value -> + repo = get_dynamic_repo() + + Ecto.Repo.Schema.insert!( + __MODULE__, + repo, + value, + Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:insert, opts)) + ) + end) + |> from_ecto() + end + + def from_ecto({:ok, result}), do: {:ok, from_ecto(result)} + def from_ecto({:error, _} = other), do: other + + def from_ecto(nil), do: nil + + def from_ecto(value) when is_list(value) do + Enum.map(value, &from_ecto/1) + end + + def from_ecto(%resource{} = record) do + if Spark.Dsl.is?(resource, Ash.Resource) do + empty = struct(resource) + + resource + |> Ash.Resource.Info.relationships() + |> Enum.reduce(record, fn relationship, record -> + case Map.get(record, relationship.name) do + %Ecto.Association.NotLoaded{} -> + Map.put(record, relationship.name, Map.get(empty, relationship.name)) + + value -> + Map.put(record, relationship.name, from_ecto(value)) + end + end) + else + record + end + end + + def from_ecto(other), do: other + + def to_ecto(nil), do: nil + + def to_ecto(value) when is_list(value) do + Enum.map(value, &to_ecto/1) + end + + def to_ecto(%resource{} = record) do + if Spark.Dsl.is?(resource, Ash.Resource) do + resource + |> Ash.Resource.Info.relationships() + |> Enum.reduce(record, fn relationship, record -> + value = + case Map.get(record, relationship.name) do + %Ash.NotLoaded{} -> + %Ecto.Association.NotLoaded{ + __field__: relationship.name, + __cardinality__: relationship.cardinality + } + + value -> + to_ecto(value) + end + + Map.put(record, relationship.name, value) + end) + else + record + end + end + + def to_ecto(other), do: other + + defoverridable init: 2, + installed_extensions: 0, + override_migration_type: 1, + min_pg_version: 0 + end + end +end diff --git a/lib/sql_implementation.ex b/lib/sql_implementation.ex new file mode 100644 index 0000000..509890e --- /dev/null +++ b/lib/sql_implementation.ex @@ -0,0 +1,449 @@ +defmodule AshSqlite.SqlImplementation do + @moduledoc false + use AshSql.Implementation + + require Ecto.Query + + @impl true + def manual_relationship_function, do: :ash_sqlite_join + + @impl true + def manual_relationship_subquery_function, do: :ash_sqlite_subquery + + @impl true + def strpos_function, do: "instr" + + @impl true + def ilike?, do: false + + @impl true + def expr( + query, + %like{arguments: [arg1, arg2], embedded?: pred_embedded?}, + bindings, + embedded?, + acc, + type + ) + when like in [AshSqlite.Functions.Like, AshSqlite.Functions.ILike] do + {arg1, acc} = + AshSql.Expr.dynamic_expr(query, arg1, bindings, pred_embedded? || embedded?, :string, acc) + + {arg2, acc} = + AshSql.Expr.dynamic_expr(query, arg2, bindings, pred_embedded? || embedded?, :string, acc) + + inner_dyn = + if like == AshSqlite.Functions.Like do + Ecto.Query.dynamic(like(^arg1, ^arg2)) + else + Ecto.Query.dynamic(like(fragment("LOWER(?)", ^arg1), fragment("LOWER(?)", ^arg2))) + end + + if type != Ash.Type.Boolean do + {:ok, inner_dyn, acc} + else + {:ok, Ecto.Query.dynamic(type(^inner_dyn, ^type)), acc} + end + end + + def expr( + query, + %Ash.Query.Function.GetPath{ + arguments: [%Ash.Query.Ref{attribute: %{type: type}}, right] + } = get_path, + bindings, + embedded?, + acc, + nil + ) + when is_atom(type) and is_list(right) do + if Ash.Type.embedded_type?(type) do + type = determine_type_at_path(type, right) + + do_get_path(query, get_path, bindings, embedded?, acc, type) + else + do_get_path(query, get_path, bindings, embedded?, acc) + end + end + + def expr( + query, + %Ash.Query.Function.GetPath{ + arguments: [%Ash.Query.Ref{attribute: %{type: {:array, type}}}, right] + } = get_path, + bindings, + embedded?, + acc, + nil + ) + when is_atom(type) and is_list(right) do + if Ash.Type.embedded_type?(type) do + type = determine_type_at_path(type, right) + do_get_path(query, get_path, bindings, embedded?, acc, type) + else + do_get_path(query, get_path, bindings, embedded?, acc) + end + end + + def expr( + query, + %Ash.Query.Function.GetPath{} = get_path, + bindings, + embedded?, + acc, + type + ) do + do_get_path(query, get_path, bindings, embedded?, acc, type) + end + + @impl true + def expr( + _query, + _expr, + _bindings, + _embedded?, + _acc, + _type + ) do + :error + end + + @impl true + def type_expr(expr, nil), do: expr + + def type_expr(expr, type) when is_atom(type) do + type = Ash.Type.get_type(type) + + cond do + !Ash.Type.ash_type?(type) -> + Ecto.Query.dynamic(type(^expr, ^type)) + + Ash.Type.storage_type(type, []) == :ci_string -> + Ecto.Query.dynamic(fragment("(? COLLATE NOCASE)", ^expr)) + + true -> + Ecto.Query.dynamic(type(^expr, ^Ash.Type.storage_type(type, []))) + end + end + + def type_expr(expr, type) do + case type do + {:parameterized, inner_type, constraints} -> + if inner_type.type(constraints) == :ci_string do + Ecto.Query.dynamic(fragment("(? COLLATE NOCASE)", ^expr)) + else + Ecto.Query.dynamic(type(^expr, ^type)) + end + + nil -> + expr + + type -> + Ecto.Query.dynamic(type(^expr, ^type)) + end + end + + @impl true + def table(resource) do + AshSqlite.DataLayer.Info.table(resource) + end + + @impl true + def schema(_resource) do + nil + end + + @impl true + def repo(resource, _kind) do + AshSqlite.DataLayer.Info.repo(resource) + end + + @impl true + def multicolumn_distinct?, do: false + + @impl true + def parameterized_type(type, constraints, no_maps? \\ false) + + def parameterized_type({:parameterized, _, _} = type, _, _) do + type + end + + def parameterized_type({:in, type}, constraints, no_maps?) do + parameterized_type({:array, type}, constraints, no_maps?) + end + + def parameterized_type({:array, type}, constraints, no_maps?) do + case parameterized_type(type, constraints[:items] || [], no_maps?) do + nil -> + nil + + type -> + {:array, type} + end + end + + def parameterized_type(type, _constraints, _no_maps?) + when type in [Ash.Type.Map, Ash.Type.Map.EctoType], + do: nil + + def parameterized_type(type, constraints, no_maps?) do + if Ash.Type.ash_type?(type) do + cast_in_query? = + if function_exported?(Ash.Type, :cast_in_query?, 2) do + Ash.Type.cast_in_query?(type, constraints) + else + Ash.Type.cast_in_query?(type) + end + + if cast_in_query? do + parameterized_type(Ash.Type.ecto_type(type), constraints, no_maps?) + else + nil + end + else + if is_atom(type) && :erlang.function_exported(type, :type, 1) do + {:parameterized, type, constraints || []} + else + type + end + end + end + + @impl true + def determine_types(mod, values) do + Code.ensure_compiled(mod) + + cond do + :erlang.function_exported(mod, :types, 0) -> + mod.types() + + :erlang.function_exported(mod, :args, 0) -> + mod.args() + + true -> + [:any] + end + |> Enum.map(fn types -> + case types do + :same -> + types = + for _ <- values do + :same + end + + closest_fitting_type(types, values) + + :any -> + for _ <- values do + :any + end + + types -> + closest_fitting_type(types, values) + end + end) + |> Enum.filter(fn types -> + Enum.all?(types, &(vagueness(&1) == 0)) + end) + |> case do + [type] -> + if type == :any || type == {:in, :any} do + nil + else + type + end + + # There are things we could likely do here + # We only say "we know what types these are" when we explicitly know + _ -> + Enum.map(values, fn _ -> nil end) + end + end + + defp closest_fitting_type(types, values) do + types_with_values = Enum.zip(types, values) + + types_with_values + |> fill_in_known_types() + |> clarify_types() + end + + defp clarify_types(types) do + basis = + types + |> Enum.map(&elem(&1, 0)) + |> Enum.min_by(&vagueness(&1)) + + Enum.map(types, fn {type, _value} -> + replace_same(type, basis) + end) + end + + defp replace_same({:in, type}, basis) do + {:in, replace_same(type, basis)} + end + + defp replace_same(:same, :same) do + :any + end + + defp replace_same(:same, {:in, :same}) do + {:in, :any} + end + + defp replace_same(:same, basis) do + basis + end + + defp replace_same(other, _basis) do + other + end + + defp fill_in_known_types(types) do + Enum.map(types, &fill_in_known_type/1) + end + + defp fill_in_known_type( + {vague_type, %Ash.Query.Ref{attribute: %{type: type, constraints: constraints}}} = ref + ) + when vague_type in [:any, :same] do + if Ash.Type.ash_type?(type) do + type = type |> parameterized_type(constraints, true) |> array_to_in() + + {type || :any, ref} + else + type = + if is_atom(type) && :erlang.function_exported(type, :type, 1) do + {:parameterized, type, []} |> array_to_in() + else + type |> array_to_in() + end + + {type, ref} + end + end + + defp fill_in_known_type( + {{:array, type}, %Ash.Query.Ref{attribute: %{type: {:array, type}} = attribute} = ref} + ) do + {:in, fill_in_known_type({type, %{ref | attribute: %{attribute | type: type}}})} + end + + defp fill_in_known_type({type, value}), do: {array_to_in(type), value} + + defp array_to_in({:array, v}), do: {:in, array_to_in(v)} + + defp array_to_in({:parameterized, type, constraints}), + do: {:parameterized, array_to_in(type), constraints} + + defp array_to_in(v), do: v + + defp vagueness({:in, type}), do: vagueness(type) + defp vagueness(:same), do: 2 + defp vagueness(:any), do: 1 + defp vagueness(_), do: 0 + + defp do_get_path( + query, + %Ash.Query.Function.GetPath{arguments: [left, right], embedded?: pred_embedded?}, + bindings, + embedded?, + acc, + type \\ nil + ) do + path = "$." <> Enum.join(right, ".") + + {expr, acc} = + AshSql.Expr.dynamic_expr( + query, + %Ash.Query.Function.Fragment{ + embedded?: pred_embedded?, + arguments: [ + raw: "json_extract(", + expr: left, + raw: ", ", + expr: path, + raw: ")" + ] + }, + bindings, + embedded?, + type, + acc + ) + + if type do + {expr, acc} = + AshSql.Expr.dynamic_expr( + query, + %Ash.Query.Function.Type{arguments: [expr, type, []]}, + bindings, + embedded?, + type, + acc + ) + + {:ok, expr, acc} + else + {:ok, expr, acc} + end + end + + defp determine_type_at_path(type, path) do + path + |> Enum.reject(&is_integer/1) + |> do_determine_type_at_path(type) + |> case do + nil -> + nil + + {type, constraints} -> + AshSqlite.Types.parameterized_type(type, constraints) + end + end + + defp do_determine_type_at_path([], _), do: nil + + defp do_determine_type_at_path([item], type) do + case Ash.Resource.Info.attribute(type, item) do + nil -> + nil + + %{type: {:array, type}, constraints: constraints} -> + constraints = constraints[:items] || [] + + {type, constraints} + + %{type: type, constraints: constraints} -> + {type, constraints} + end + end + + defp do_determine_type_at_path([item | rest], type) do + case Ash.Resource.Info.attribute(type, item) do + nil -> + nil + + %{type: {:array, type}} -> + if Ash.Type.embedded_type?(type) do + type + else + nil + end + + %{type: type} -> + if Ash.Type.embedded_type?(type) do + type + else + nil + end + end + |> case do + nil -> + nil + + type -> + do_determine_type_at_path(rest, type) + end + end +end diff --git a/lib/statement.ex b/lib/statement.ex new file mode 100644 index 0000000..506c963 --- /dev/null +++ b/lib/statement.ex @@ -0,0 +1,45 @@ +defmodule AshSqlite.Statement do + @moduledoc "Represents a custom statement to be run in generated migrations" + + @fields [ + :name, + :up, + :down, + :code? + ] + + defstruct @fields + + def fields, do: @fields + + @schema [ + name: [ + type: :atom, + required: true, + doc: """ + The name of the statement, must be unique within the resource + """ + ], + code?: [ + type: :boolean, + default: false, + doc: """ + By default, we place the strings inside of ecto migration's `execute/1` function and assume they are sql. Use this option if you want to provide custom elixir code to be placed directly in the migrations + """ + ], + up: [ + type: :string, + doc: """ + How to create the structure of the statement + """, + required: true + ], + down: [ + type: :string, + doc: "How to tear down the structure of the statement", + required: true + ] + ] + + def schema, do: @schema +end diff --git a/lib/transformers/ensure_table_or_polymorphic.ex b/lib/transformers/ensure_table_or_polymorphic.ex new file mode 100644 index 0000000..b6a4dd4 --- /dev/null +++ b/lib/transformers/ensure_table_or_polymorphic.ex @@ -0,0 +1,30 @@ +defmodule AshSqlite.Transformers.EnsureTableOrPolymorphic do + @moduledoc false + use Spark.Dsl.Transformer + alias Spark.Dsl.Transformer + + def transform(dsl) do + if Transformer.get_option(dsl, [:sqlite], :polymorphic?) || + Transformer.get_option(dsl, [:sqlite], :table) do + {:ok, dsl} + else + resource = Transformer.get_persisted(dsl, :module) + + raise Spark.Error.DslError, + module: resource, + message: """ + Must configure a table for #{inspect(resource)}. + + For example: + + ```elixir + sqlite do + table "the_table" + repo YourApp.Repo + end + ``` + """, + path: [:sqlite, :table] + end + end +end diff --git a/lib/transformers/validate_references.ex b/lib/transformers/validate_references.ex new file mode 100644 index 0000000..0d63a0b --- /dev/null +++ b/lib/transformers/validate_references.ex @@ -0,0 +1,23 @@ +defmodule AshSqlite.Transformers.ValidateReferences do + @moduledoc false + use Spark.Dsl.Transformer + alias Spark.Dsl.Transformer + + def after_compile?, do: true + + def transform(dsl) do + dsl + |> AshSqlite.DataLayer.Info.references() + |> Enum.each(fn reference -> + unless Ash.Resource.Info.relationship(dsl, reference.relationship) do + raise Spark.Error.DslError, + path: [:sqlite, :references, reference.relationship], + module: Transformer.get_persisted(dsl, :module), + message: + "Found reference configuration for relationship `#{reference.relationship}`, but no such relationship exists" + end + end) + + {:ok, dsl} + end +end diff --git a/lib/transformers/verify_repo.ex b/lib/transformers/verify_repo.ex new file mode 100644 index 0000000..5f49882 --- /dev/null +++ b/lib/transformers/verify_repo.ex @@ -0,0 +1,22 @@ +defmodule AshSqlite.Transformers.VerifyRepo do + @moduledoc false + use Spark.Dsl.Transformer + alias Spark.Dsl.Transformer + + def after_compile?, do: true + + def transform(dsl) do + repo = Transformer.get_option(dsl, [:sqlite], :repo) + + cond do + match?({:error, _}, Code.ensure_compiled(repo)) -> + {:error, "Could not find repo module #{repo}"} + + repo.__adapter__() != Ecto.Adapters.MyXQL -> + {:error, "Expected a repo using the MySQL adapter `Ecto.Adapters.MyXQL`"} + + true -> + {:ok, dsl} + end + end +end diff --git a/lib/type.ex b/lib/type.ex new file mode 100644 index 0000000..1be8904 --- /dev/null +++ b/lib/type.ex @@ -0,0 +1,19 @@ +defmodule AshSqlite.Type do + @moduledoc """ + Sqlite specific callbacks for `Ash.Type`. + + Use this in addition to `Ash.Type`. + """ + + @callback value_to_sqlite_default(Ash.Type.t(), Ash.Type.constraints(), term) :: + {:ok, String.t()} | :error + + defmacro __using__(_) do + quote do + @behaviour AshSqlite.Type + def value_to_sqlite_default(_, _, _), do: :error + + defoverridable value_to_sqlite_default: 3 + end + end +end diff --git a/lib/types/types.ex b/lib/types/types.ex new file mode 100644 index 0000000..63c5f3e --- /dev/null +++ b/lib/types/types.ex @@ -0,0 +1,182 @@ +defmodule AshSqlite.Types do + @moduledoc false + + alias Ash.Query.Ref + + def parameterized_type({:parameterized, _, _} = type, _) do + type + end + + def parameterized_type({:in, type}, constraints) do + parameterized_type({:array, type}, constraints) + end + + def parameterized_type({:array, type}, constraints) do + case parameterized_type(type, constraints[:items] || []) do + nil -> + nil + + type -> + {:array, type} + end + end + + def parameterized_type(type, _constraints) when type in [Ash.Type.Map, Ash.Type.Map.EctoType], + do: nil + + def parameterized_type(type, constraints) do + if Ash.Type.ash_type?(type) do + cast_in_query? = + if function_exported?(Ash.Type, :cast_in_query?, 2) do + Ash.Type.cast_in_query?(type, constraints) + else + Ash.Type.cast_in_query?(type) + end + + if cast_in_query? do + parameterized_type(Ash.Type.ecto_type(type), constraints) + else + nil + end + else + if is_atom(type) && :erlang.function_exported(type, :type, 1) do + {:parameterized, type, constraints || []} + else + type + end + end + end + + def determine_types(mod, values) do + Code.ensure_compiled(mod) + + cond do + :erlang.function_exported(mod, :types, 0) -> + mod.types() + + :erlang.function_exported(mod, :args, 0) -> + mod.args() + + true -> + [:any] + end + |> Enum.map(fn types -> + case types do + :same -> + types = + for _ <- values do + :same + end + + closest_fitting_type(types, values) + + :any -> + for _ <- values do + :any + end + + types -> + closest_fitting_type(types, values) + end + end) + |> Enum.filter(fn types -> + Enum.all?(types, &(vagueness(&1) == 0)) + end) + |> case do + [type] -> + if type == :any || type == {:in, :any} do + nil + else + type + end + + # There are things we could likely do here + # We only say "we know what types these are" when we explicitly know + _ -> + Enum.map(values, fn _ -> nil end) + end + end + + defp closest_fitting_type(types, values) do + types_with_values = Enum.zip(types, values) + + types_with_values + |> fill_in_known_types() + |> clarify_types() + end + + defp clarify_types(types) do + basis = + types + |> Enum.map(&elem(&1, 0)) + |> Enum.min_by(&vagueness(&1)) + + Enum.map(types, fn {type, _value} -> + replace_same(type, basis) + end) + end + + defp replace_same({:in, type}, basis) do + {:in, replace_same(type, basis)} + end + + defp replace_same(:same, :same) do + :any + end + + defp replace_same(:same, {:in, :same}) do + {:in, :any} + end + + defp replace_same(:same, basis) do + basis + end + + defp replace_same(other, _basis) do + other + end + + defp fill_in_known_types(types) do + Enum.map(types, &fill_in_known_type/1) + end + + defp fill_in_known_type( + {vague_type, %Ref{attribute: %{type: type, constraints: constraints}}} = ref + ) + when vague_type in [:any, :same] do + if Ash.Type.ash_type?(type) do + type = type |> parameterized_type(constraints) |> array_to_in() + + {type || :any, ref} + else + type = + if is_atom(type) && :erlang.function_exported(type, :type, 1) do + {:parameterized, type, []} |> array_to_in() + else + type |> array_to_in() + end + + {type, ref} + end + end + + defp fill_in_known_type( + {{:array, type}, %Ref{attribute: %{type: {:array, type}} = attribute} = ref} + ) do + {:in, fill_in_known_type({type, %{ref | attribute: %{attribute | type: type}}})} + end + + defp fill_in_known_type({type, value}), do: {array_to_in(type), value} + + defp array_to_in({:array, v}), do: {:in, array_to_in(v)} + + defp array_to_in({:parameterized, type, constraints}), + do: {:parameterized, array_to_in(type), constraints} + + defp array_to_in(v), do: v + + defp vagueness({:in, type}), do: vagueness(type) + defp vagueness(:same), do: 2 + defp vagueness(:any), do: 1 + defp vagueness(_), do: 0 +end diff --git a/logos/small-logo.png b/logos/small-logo.png new file mode 100644 index 0000000..9fc9aa1 Binary files /dev/null and b/logos/small-logo.png differ diff --git a/mix.exs b/mix.exs new file mode 100644 index 0000000..fcad7b9 --- /dev/null +++ b/mix.exs @@ -0,0 +1,203 @@ +defmodule AshSqlite.MixProject do + use Mix.Project + + @description """ + The SQLite data layer for Ash Framework. + """ + + @version "0.1.2" + + def project do + [ + app: :ash_sqlite, + version: @version, + elixir: "~> 1.11", + start_permanent: Mix.env() == :prod, + deps: deps(), + description: @description, + elixirc_paths: elixirc_paths(Mix.env()), + preferred_cli_env: [ + coveralls: :test, + "coveralls.github": :test, + "test.create": :test, + "test.migrate": :test, + "test.rollback": :test, + "test.check_migrations": :test, + "test.drop": :test, + "test.generate_migrations": :test, + "test.reset": :test + ], + dialyzer: [ + plt_add_apps: [:ecto, :ash, :mix] + ], + docs: docs(), + aliases: aliases(), + package: package(), + source_url: "https://github.com/ash-project/ash_sqlite", + homepage_url: "https://github.com/ash-project/ash_sqlite", + consolidate_protocols: Mix.env() != :test + ] + end + + if Mix.env() == :test do + def application() do + [ + mod: {AshSqlite.TestApp, []} + ] + end + end + + defp elixirc_paths(:test), do: ["lib", "test/support"] + defp elixirc_paths(_), do: ["lib"] + + defp package do + [ + name: :ash_sqlite, + licenses: ["MIT"], + files: ~w(lib .formatter.exs mix.exs README* LICENSE* + CHANGELOG* documentation), + links: %{ + GitHub: "https://github.com/ash-project/ash_sqlite" + } + ] + end + + defp docs do + [ + main: "readme", + source_ref: "v#{@version}", + logo: "logos/small-logo.png", + extras: [ + {"README.md", title: "Home"}, + "documentation/tutorials/getting-started-with-ash-sqlite.md", + "documentation/topics/about-ash-sqlite/what-is-ash-sqlite.md", + "documentation/topics/resources/references.md", + "documentation/topics/resources/polymorphic-resources.md", + "documentation/topics/development/migrations-and-tasks.md", + "documentation/topics/development/testing.md", + "documentation/topics/advanced/expressions.md", + "documentation/topics/advanced/manual-relationships.md", + "documentation/dsls/DSL:-AshSqlite.DataLayer.md", + "CHANGELOG.md" + ], + groups_for_extras: [ + Tutorials: [ + ~r'documentation/tutorials' + ], + "How To": ~r'documentation/how_to', + Topics: ~r'documentation/topics', + DSLs: ~r'documentation/dsls', + "About AshSqlite": [ + "CHANGELOG.md" + ] + ], + groups_for_modules: [ + AshSqlite: [ + AshSqlite, + AshSqlite.Repo, + AshSqlite.DataLayer + ], + Utilities: [ + AshSqlite.ManualRelationship + ], + Introspection: [ + AshSqlite.DataLayer.Info, + AshSqlite.CustomExtension, + AshSqlite.CustomIndex, + AshSqlite.Reference, + AshSqlite.Statement + ], + Types: [ + AshSqlite.Type + ], + Expressions: [ + AshSqlite.Functions.Fragment, + AshSqlite.Functions.Like + ], + Internals: ~r/.*/ + ] + ] + end + + # Run "mix help deps" to learn about dependencies. + defp deps do + [ + {:ecto_sql, "~> 3.9"}, + {:myxql, ">= 0.0.0"}, + {:ecto, "~> 3.9"}, + {:jason, "~> 1.0"}, + {:ash, ash_version("~> 3.0")}, + {:ash_sql, ash_sql_version("~> 0.1")}, + {:git_ops, "~> 2.5", only: [:dev, :test]}, + {:ex_doc, "~> 0.22", only: [:dev, :test], runtime: false}, + {:ex_check, "~> 0.14", only: [:dev, :test]}, + {:credo, ">= 0.0.0", only: [:dev, :test], runtime: false}, + {:dialyxir, ">= 0.0.0", only: [:dev, :test], runtime: false}, + {:sobelow, ">= 0.0.0", only: [:dev, :test], runtime: false}, + {:mix_audit, ">= 0.0.0", only: [:dev, :test], runtime: false} + ] + end + + defp ash_version(default_version) do + case System.get_env("ASH_VERSION") do + nil -> + default_version + + "local" -> + [path: "../ash", override: true] + + "main" -> + [git: "https://github.com/ash-project/ash.git"] + + version when is_binary(version) -> + "~> #{version}" + + version -> + version + end + end + + defp ash_sql_version(default_version) do + case System.get_env("ASH_SQL_VERSION") do + nil -> + default_version + + "local" -> + [path: "../ash_sql", override: true] + + "main" -> + [git: "https://github.com/ash-project/ash_sql.git"] + + version when is_binary(version) -> + "~> #{version}" + + version -> + version + end + end + + defp aliases do + [ + sobelow: + "sobelow --skip -i Config.Secrets --ignore-files lib/migration_generator/migration_generator.ex", + credo: "credo --strict", + docs: [ + "spark.cheat_sheets", + "docs", + "spark.replace_doc_links", + "spark.cheat_sheets_in_search" + ], + "spark.formatter": "spark.formatter --extensions AshSqlite.DataLayer", + "spark.cheat_sheets": "spark.cheat_sheets --extensions AshSqlite.DataLayer", + "spark.cheat_sheets_in_search": + "spark.cheat_sheets_in_search --extensions AshSqlite.DataLayer", + "test.generate_migrations": "ash_sqlite.generate_migrations", + "test.check_migrations": "ash_sqlite.generate_migrations --check", + "test.migrate": "ash_sqlite.migrate", + "test.rollback": "ash_sqlite.rollback", + "test.create": "ash_sqlite.create", + "test.reset": ["test.drop", "test.create", "test.migrate"], + "test.drop": "ash_sqlite.drop" + ] + end +end diff --git a/mix.lock b/mix.lock new file mode 100644 index 0000000..5610185 --- /dev/null +++ b/mix.lock @@ -0,0 +1,42 @@ +%{ + "ash": {:hex, :ash, "3.0.0", "2ef88639fce9f126c57c115f955d7e29919942afe74adc00cb7250fd7ced9f5f", [:mix], [{:comparable, "~> 1.0", [hex: :comparable, repo: "hexpm", optional: false]}, {:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:ecto, "~> 3.7", [hex: :ecto, repo: "hexpm", optional: false]}, {:ets, "~> 0.8", [hex: :ets, repo: "hexpm", optional: false]}, {:jason, ">= 1.0.0", [hex: :jason, repo: "hexpm", optional: false]}, {:picosat_elixir, "~> 0.2", [hex: :picosat_elixir, repo: "hexpm", optional: true]}, {:plug, ">= 0.0.0", [hex: :plug, repo: "hexpm", optional: true]}, {:reactor, ">= 0.8.1 and < 1.0.0-0", [hex: :reactor, repo: "hexpm", optional: false]}, {:simple_sat, ">= 0.1.1 and < 1.0.0-0", [hex: :simple_sat, repo: "hexpm", optional: true]}, {:spark, ">= 2.1.18 and < 3.0.0-0", [hex: :spark, repo: "hexpm", optional: false]}, {:splode, "~> 0.2", [hex: :splode, repo: "hexpm", optional: false]}, {:stream_data, "~> 0.6", [hex: :stream_data, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.1", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "ffed4651c9faf79e90066afdd52202e3f8951624bf73fd8ad34aa4c22fceef4b"}, + "ash_sql": {:hex, :ash_sql, "0.1.1-rc.20", "54f1007c101ddce806a065e94d77890cb80a5c80fa485858334d33ccb753c620", [:mix], [{:ash, "~> 3.0.0-rc", [hex: :ash, repo: "hexpm", optional: false]}, {:ecto, "~> 3.9", [hex: :ecto, repo: "hexpm", optional: false]}, {:ecto_sql, "~> 3.9", [hex: :ecto_sql, repo: "hexpm", optional: false]}], "hexpm", "26094d2fa92606e882399a16839815d2f9efe6cc06408c295c53525e3372c17c"}, + "bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"}, + "cc_precompiler": {:hex, :cc_precompiler, "0.1.10", "47c9c08d8869cf09b41da36538f62bc1abd3e19e41701c2cea2675b53c704258", [:mix], [{:elixir_make, "~> 0.7", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "f6e046254e53cd6b41c6bacd70ae728011aa82b2742a80d6e2214855c6e06b22"}, + "comparable": {:hex, :comparable, "1.0.0", "bb669e91cedd14ae9937053e5bcbc3c52bb2f22422611f43b6e38367d94a495f", [:mix], [{:typable, "~> 0.1", [hex: :typable, repo: "hexpm", optional: false]}], "hexpm", "277c11eeb1cd726e7cd41c6c199e7e52fa16ee6830b45ad4cdc62e51f62eb60c"}, + "credo": {:hex, :credo, "1.7.6", "b8f14011a5443f2839b04def0b252300842ce7388f3af177157c86da18dfbeea", [:mix], [{:bunt, "~> 0.2.1 or ~> 1.0", [hex: :bunt, repo: "hexpm", optional: false]}, {:file_system, "~> 0.2 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "146f347fb9f8cbc5f7e39e3f22f70acbef51d441baa6d10169dd604bfbc55296"}, + "db_connection": {:hex, :db_connection, "2.6.0", "77d835c472b5b67fc4f29556dee74bf511bbafecdcaf98c27d27fa5918152086", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "c2f992d15725e721ec7fbc1189d4ecdb8afef76648c746a8e1cad35e3b8a35f3"}, + "decimal": {:hex, :decimal, "2.1.1", "5611dca5d4b2c3dd497dec8f68751f1f1a54755e8ed2a966c2633cf885973ad6", [:mix], [], "hexpm", "53cfe5f497ed0e7771ae1a475575603d77425099ba5faef9394932b35020ffcc"}, + "dialyxir": {:hex, :dialyxir, "1.4.3", "edd0124f358f0b9e95bfe53a9fcf806d615d8f838e2202a9f430d59566b6b53b", [:mix], [{:erlex, ">= 0.2.6", [hex: :erlex, repo: "hexpm", optional: false]}], "hexpm", "bf2cfb75cd5c5006bec30141b131663299c661a864ec7fbbc72dfa557487a986"}, + "earmark_parser": {:hex, :earmark_parser, "1.4.39", "424642f8335b05bb9eb611aa1564c148a8ee35c9c8a8bba6e129d51a3e3c6769", [:mix], [], "hexpm", "06553a88d1f1846da9ef066b87b57c6f605552cfbe40d20bd8d59cc6bde41944"}, + "ecto": {:hex, :ecto, "3.11.2", "e1d26be989db350a633667c5cda9c3d115ae779b66da567c68c80cfb26a8c9ee", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "3c38bca2c6f8d8023f2145326cc8a80100c3ffe4dcbd9842ff867f7fc6156c65"}, + "ecto_sql": {:hex, :ecto_sql, "3.11.1", "e9abf28ae27ef3916b43545f9578b4750956ccea444853606472089e7d169470", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.11.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.6.0", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.16.0 or ~> 0.17.0 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "ce14063ab3514424276e7e360108ad6c2308f6d88164a076aac8a387e1fea634"}, + "ecto_sqlite3": {:hex, :ecto_sqlite3, "0.15.1", "40f2fbd9e246455f8c42e7e0a77009ef806caa1b3ce6f717b2a0a80e8432fcfd", [:mix], [{:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:ecto, "~> 3.11", [hex: :ecto, repo: "hexpm", optional: false]}, {:ecto_sql, "~> 3.11", [hex: :ecto_sql, repo: "hexpm", optional: false]}, {:exqlite, "~> 0.19", [hex: :exqlite, repo: "hexpm", optional: false]}], "hexpm", "28b16e177123c688948357176662bf9ff9084daddf950ef5b6baf3ee93707064"}, + "elixir_make": {:hex, :elixir_make, "0.8.3", "d38d7ee1578d722d89b4d452a3e36bcfdc644c618f0d063b874661876e708683", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:certifi, "~> 2.0", [hex: :certifi, repo: "hexpm", optional: true]}], "hexpm", "5c99a18571a756d4af7a4d89ca75c28ac899e6103af6f223982f09ce44942cc9"}, + "erlex": {:hex, :erlex, "0.2.6", "c7987d15e899c7a2f34f5420d2a2ea0d659682c06ac607572df55a43753aa12e", [:mix], [], "hexpm", "2ed2e25711feb44d52b17d2780eabf998452f6efda104877a3881c2f8c0c0c75"}, + "ets": {:hex, :ets, "0.9.0", "79c6a6c205436780486f72d84230c6cba2f8a9920456750ddd1e47389107d5fd", [:mix], [], "hexpm", "2861fdfb04bcaeff370f1a5904eec864f0a56dcfebe5921ea9aadf2a481c822b"}, + "ex_check": {:hex, :ex_check, "0.16.0", "07615bef493c5b8d12d5119de3914274277299c6483989e52b0f6b8358a26b5f", [:mix], [], "hexpm", "4d809b72a18d405514dda4809257d8e665ae7cf37a7aee3be6b74a34dec310f5"}, + "ex_doc": {:hex, :ex_doc, "0.32.1", "21e40f939515373bcdc9cffe65f3b3543f05015ac6c3d01d991874129d173420", [:mix], [{:earmark_parser, "~> 1.4.39", [hex: :earmark_parser, repo: "hexpm", optional: false]}, {:makeup_c, ">= 0.1.1", [hex: :makeup_c, repo: "hexpm", optional: true]}, {:makeup_elixir, "~> 0.14", [hex: :makeup_elixir, repo: "hexpm", optional: false]}, {:makeup_erlang, "~> 0.1", [hex: :makeup_erlang, repo: "hexpm", optional: false]}], "hexpm", "5142c9db521f106d61ff33250f779807ed2a88620e472ac95dc7d59c380113da"}, + "exqlite": {:hex, :exqlite, "0.21.0", "8d06c60b3d6df42bb4cdeb4dce4bc804788e227cead7dc190c3ffaba50bffbb4", [:make, :mix], [{:cc_precompiler, "~> 0.1", [hex: :cc_precompiler, repo: "hexpm", optional: false]}, {:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:elixir_make, "~> 0.8", [hex: :elixir_make, repo: "hexpm", optional: false]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "b177180bb2788b761ddd5949763640aef92ed06db80d70a1130b6bede180b45f"}, + "file_system": {:hex, :file_system, "1.0.0", "b689cc7dcee665f774de94b5a832e578bd7963c8e637ef940cd44327db7de2cd", [:mix], [], "hexpm", "6752092d66aec5a10e662aefeed8ddb9531d79db0bc145bb8c40325ca1d8536d"}, + "git_cli": {:hex, :git_cli, "0.3.0", "a5422f9b95c99483385b976f5d43f7e8233283a47cda13533d7c16131cb14df5", [:mix], [], "hexpm", "78cb952f4c86a41f4d3511f1d3ecb28edb268e3a7df278de2faa1bd4672eaf9b"}, + "git_ops": {:hex, :git_ops, "2.6.1", "cc7799a68c26cf814d6d1a5121415b4f5bf813de200908f930b27a2f1fe9dad5", [:mix], [{:git_cli, "~> 0.2", [hex: :git_cli, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.0", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "ce62d07e41fe993ec22c35d5edb11cf333a21ddaead6f5d9868fcb607d42039e"}, + "jason": {:hex, :jason, "1.4.1", "af1504e35f629ddcdd6addb3513c3853991f694921b1b9368b0bd32beb9f1b63", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "fbb01ecdfd565b56261302f7e1fcc27c4fb8f32d56eab74db621fc154604a7a1"}, + "libgraph": {:hex, :libgraph, "0.16.0", "3936f3eca6ef826e08880230f806bfea13193e49bf153f93edcf0239d4fd1d07", [:mix], [], "hexpm", "41ca92240e8a4138c30a7e06466acc709b0cbb795c643e9e17174a178982d6bf"}, + "makeup": {:hex, :makeup, "1.1.1", "fa0bc768698053b2b3869fa8a62616501ff9d11a562f3ce39580d60860c3a55e", [:mix], [{:nimble_parsec, "~> 1.2.2 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "5dc62fbdd0de44de194898b6710692490be74baa02d9d108bc29f007783b0b48"}, + "makeup_elixir": {:hex, :makeup_elixir, "0.16.2", "627e84b8e8bf22e60a2579dad15067c755531fea049ae26ef1020cad58fe9578", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}, {:nimble_parsec, "~> 1.2.3 or ~> 1.3", [hex: :nimble_parsec, repo: "hexpm", optional: false]}], "hexpm", "41193978704763f6bbe6cc2758b84909e62984c7752b3784bd3c218bb341706b"}, + "makeup_erlang": {:hex, :makeup_erlang, "0.1.5", "e0ff5a7c708dda34311f7522a8758e23bfcd7d8d8068dc312b5eb41c6fd76eba", [:mix], [{:makeup, "~> 1.0", [hex: :makeup, repo: "hexpm", optional: false]}], "hexpm", "94d2e986428585a21516d7d7149781480013c56e30c6a233534bedf38867a59a"}, + "mix_audit": {:hex, :mix_audit, "2.1.3", "c70983d5cab5dca923f9a6efe559abfb4ec3f8e87762f02bab00fa4106d17eda", [:make, :mix], [{:jason, "~> 1.1", [hex: :jason, repo: "hexpm", optional: false]}, {:yaml_elixir, "~> 2.9", [hex: :yaml_elixir, repo: "hexpm", optional: false]}], "hexpm", "8c3987100b23099aea2f2df0af4d296701efd031affb08d0746b2be9e35988ec"}, + "myxql": {:hex, :myxql, "0.6.4", "1502ea37ee23c31b79725b95d4cc3553693c2bda7421b1febc50722fd988c918", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.6 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:geo, "~> 3.4", [hex: :geo, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "a3307f4671f3009d3708283649adf205bfe280f7e036fc8ef7f16dbf821ab8e9"}, + "nimble_parsec": {:hex, :nimble_parsec, "1.4.0", "51f9b613ea62cfa97b25ccc2c1b4216e81df970acd8e16e8d1bdc58fef21370d", [:mix], [], "hexpm", "9c565862810fb383e9838c1dd2d7d2c437b3d13b267414ba6af33e50d2d1cf28"}, + "reactor": {:hex, :reactor, "0.8.2", "b2be82b1c3402537d06a8f85bb1849f72cb6b4be140495cb8956de7aec2fdebd", [:mix], [{:libgraph, "~> 0.16", [hex: :libgraph, repo: "hexpm", optional: false]}, {:spark, "~> 2.0", [hex: :spark, repo: "hexpm", optional: false]}, {:splode, "~> 0.2", [hex: :splode, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.2", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "c35eb23b77cc77ba922af108722ac93257899e35cfdd18882f0e659ad2cac9f3"}, + "sobelow": {:hex, :sobelow, "0.13.0", "218afe9075904793f5c64b8837cc356e493d88fddde126a463839351870b8d1e", [:mix], [{:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "cd6e9026b85fc35d7529da14f95e85a078d9dd1907a9097b3ba6ac7ebbe34a0d"}, + "sourceror": {:hex, :sourceror, "1.1.0", "9c129fa1bd7290014acf6f73e292f43938c17e3fccd7b7df6f41122cab45dda9", [:mix], [], "hexpm", "b9c348688e2cfc20acfef0feaca88643044be5acd2e0b02cf4a8d6ac1edc4c4a"}, + "spark": {:hex, :spark, "2.1.21", "0c2e5c24bc99f65ee874a563f9f3ba6e5c7c8a79b7de4b3b65af770ca6c8120e", [:mix], [{:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:sourceror, "~> 1.0", [hex: :sourceror, repo: "hexpm", optional: false]}], "hexpm", "e8a32fd3138524096553d908f37ddb23f0c7cb731d75018d5f2ad6cb583714ec"}, + "splode": {:hex, :splode, "0.2.4", "71046334c39605095ca4bed5d008372e56454060997da14f9868534c17b84b53", [:mix], [], "hexpm", "ca3b95f0d8d4b482b5357954fec857abd0fa3ea509d623334c1328e7382044c2"}, + "stream_data": {:hex, :stream_data, "0.6.0", "e87a9a79d7ec23d10ff83eb025141ef4915eeb09d4491f79e52f2562b73e5f47", [:mix], [], "hexpm", "b92b5031b650ca480ced047578f1d57ea6dd563f5b57464ad274718c9c29501c"}, + "telemetry": {:hex, :telemetry, "1.2.1", "68fdfe8d8f05a8428483a97d7aab2f268aaff24b49e0f599faa091f1d4e7f61c", [:rebar3], [], "hexpm", "dad9ce9d8effc621708f99eac538ef1cbe05d6a874dd741de2e689c47feafed5"}, + "typable": {:hex, :typable, "0.3.0", "0431e121d124cd26f312123e313d2689b9a5322b15add65d424c07779eaa3ca1", [:mix], [], "hexpm", "880a0797752da1a4c508ac48f94711e04c86156f498065a83d160eef945858f8"}, + "yamerl": {:hex, :yamerl, "0.10.0", "4ff81fee2f1f6a46f1700c0d880b24d193ddb74bd14ef42cb0bcf46e81ef2f8e", [:rebar3], [], "hexpm", "346adb2963f1051dc837a2364e4acf6eb7d80097c0f53cbdc3046ec8ec4b4e6e"}, + "yaml_elixir": {:hex, :yaml_elixir, "2.9.0", "9a256da867b37b8d2c1ffd5d9de373a4fda77a32a45b452f1708508ba7bbcb53", [:mix], [{:yamerl, "~> 0.10", [hex: :yamerl, repo: "hexpm", optional: false]}], "hexpm", "0cb0e7d4c56f5e99a6253ed1a670ed0e39c13fc45a6da054033928607ac08dfc"}, +} diff --git a/priv/resource_snapshots/test_repo/accounts/20240405234211.json b/priv/resource_snapshots/test_repo/accounts/20240405234211.json new file mode 100644 index 0000000..7b9ca04 --- /dev/null +++ b/priv/resource_snapshots/test_repo/accounts/20240405234211.json @@ -0,0 +1,62 @@ +{ + "attributes": [ + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "id", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": true + }, + { + "default": "nil", + "size": null, + "type": "boolean", + "source": "is_active", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "user_id", + "references": { + "name": "accounts_user_id_fkey", + "table": "users", + "on_delete": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "primary_key?": true, + "destination_attribute": "id", + "on_update": null, + "deferrable": false, + "destination_attribute_default": null, + "destination_attribute_generated": null + }, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + } + ], + "table": "accounts", + "hash": "2320B8B55C597C2F07DED9B7BF714832FE22B0AA5E05959A4EA0553669BC368D", + "repo": "Elixir.AshSqlite.TestRepo", + "identities": [], + "base_filter": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "custom_indexes": [], + "custom_statements": [], + "has_create_action": true +} \ No newline at end of file diff --git a/priv/resource_snapshots/test_repo/authors/20240405234211.json b/priv/resource_snapshots/test_repo/authors/20240405234211.json new file mode 100644 index 0000000..8e367c9 --- /dev/null +++ b/priv/resource_snapshots/test_repo/authors/20240405234211.json @@ -0,0 +1,70 @@ +{ + "attributes": [ + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "id", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": true + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "first_name", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "last_name", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "map", + "source": "bio", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": [ + "array", + "text" + ], + "source": "badges", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + } + ], + "table": "authors", + "hash": "EFBB1E574CC263E6E650121801C48B4370F1C9A7C8A213BEF111BFC769BF6651", + "repo": "Elixir.AshSqlite.TestRepo", + "identities": [], + "base_filter": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "custom_indexes": [], + "custom_statements": [], + "has_create_action": true +} \ No newline at end of file diff --git a/priv/resource_snapshots/test_repo/comment_ratings/20240405234211.json b/priv/resource_snapshots/test_repo/comment_ratings/20240405234211.json new file mode 100644 index 0000000..2d4158d --- /dev/null +++ b/priv/resource_snapshots/test_repo/comment_ratings/20240405234211.json @@ -0,0 +1,62 @@ +{ + "attributes": [ + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "id", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": true + }, + { + "default": "nil", + "size": null, + "type": "bigint", + "source": "score", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "resource_id", + "references": { + "name": "comment_ratings_resource_id_fkey", + "table": "comments", + "on_delete": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "primary_key?": true, + "destination_attribute": "id", + "on_update": null, + "deferrable": false, + "destination_attribute_default": "nil", + "destination_attribute_generated": false + }, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + } + ], + "table": "comment_ratings", + "hash": "88FFC6DC62CEA37397A9C16C51E43F6FF6EED6C34E4C529FFB4D20EF1BCFF98F", + "repo": "Elixir.AshSqlite.TestRepo", + "identities": [], + "base_filter": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "custom_indexes": [], + "custom_statements": [], + "has_create_action": true +} \ No newline at end of file diff --git a/priv/resource_snapshots/test_repo/comments/20240405234211.json b/priv/resource_snapshots/test_repo/comments/20240405234211.json new file mode 100644 index 0000000..a8d033e --- /dev/null +++ b/priv/resource_snapshots/test_repo/comments/20240405234211.json @@ -0,0 +1,117 @@ +{ + "attributes": [ + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "id", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": true + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "title", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "bigint", + "source": "likes", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "utc_datetime_usec", + "source": "arbitrary_timestamp", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "utc_datetime_usec", + "source": "created_at", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "post_id", + "references": { + "name": "special_name_fkey", + "table": "posts", + "on_delete": "delete", + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "primary_key?": true, + "destination_attribute": "id", + "on_update": "update", + "deferrable": false, + "destination_attribute_default": null, + "destination_attribute_generated": null + }, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "author_id", + "references": { + "name": "comments_author_id_fkey", + "table": "authors", + "on_delete": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "primary_key?": true, + "destination_attribute": "id", + "on_update": null, + "deferrable": false, + "destination_attribute_default": null, + "destination_attribute_generated": null + }, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + } + ], + "table": "comments", + "hash": "4F081363C965C68A8E3CC755BCA058C9DC0FB18F5BE5B44FEBEB41B787727702", + "repo": "Elixir.AshSqlite.TestRepo", + "identities": [], + "base_filter": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "custom_indexes": [], + "custom_statements": [], + "has_create_action": true +} \ No newline at end of file diff --git a/priv/resource_snapshots/test_repo/integer_posts/20240405234211.json b/priv/resource_snapshots/test_repo/integer_posts/20240405234211.json new file mode 100644 index 0000000..0b19d76 --- /dev/null +++ b/priv/resource_snapshots/test_repo/integer_posts/20240405234211.json @@ -0,0 +1,37 @@ +{ + "attributes": [ + { + "default": "nil", + "size": null, + "type": "bigint", + "source": "id", + "references": null, + "allow_nil?": false, + "generated?": true, + "primary_key?": true + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "title", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + } + ], + "table": "integer_posts", + "hash": "A3F61182D99B092A9D17E34B645823D8B0561B467B0195EFE0DA42947153D7E0", + "repo": "Elixir.AshSqlite.TestRepo", + "identities": [], + "base_filter": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "custom_indexes": [], + "custom_statements": [], + "has_create_action": true +} \ No newline at end of file diff --git a/priv/resource_snapshots/test_repo/managers/20240405234211.json b/priv/resource_snapshots/test_repo/managers/20240405234211.json new file mode 100644 index 0000000..945168f --- /dev/null +++ b/priv/resource_snapshots/test_repo/managers/20240405234211.json @@ -0,0 +1,101 @@ +{ + "attributes": [ + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "id", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": true + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "name", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "code", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "must_be_present", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "role", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "organization_id", + "references": { + "name": "managers_organization_id_fkey", + "table": "orgs", + "on_delete": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "primary_key?": true, + "destination_attribute": "id", + "on_update": null, + "deferrable": false, + "destination_attribute_default": null, + "destination_attribute_generated": null + }, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + } + ], + "table": "managers", + "hash": "1A4EFC8497F6A73543858892D6324407A7060AC2585EDCA9A759D1E8AF509DEF", + "repo": "Elixir.AshSqlite.TestRepo", + "identities": [ + { + "name": "uniq_code", + "keys": [ + "code" + ], + "base_filter": null, + "index_name": "managers_uniq_code_index" + } + ], + "base_filter": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "custom_indexes": [], + "custom_statements": [], + "has_create_action": true +} \ No newline at end of file diff --git a/priv/resource_snapshots/test_repo/orgs/20240405234211.json b/priv/resource_snapshots/test_repo/orgs/20240405234211.json new file mode 100644 index 0000000..daee888 --- /dev/null +++ b/priv/resource_snapshots/test_repo/orgs/20240405234211.json @@ -0,0 +1,37 @@ +{ + "attributes": [ + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "id", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": true + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "name", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + } + ], + "table": "orgs", + "hash": "106CE7B860A710A1275B05F81F2272B74678DC467F87E4179F9BEA8BC979613C", + "repo": "Elixir.AshSqlite.TestRepo", + "identities": [], + "base_filter": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "custom_indexes": [], + "custom_statements": [], + "has_create_action": true +} \ No newline at end of file diff --git a/priv/resource_snapshots/test_repo/post_links/20240405234211.json b/priv/resource_snapshots/test_repo/post_links/20240405234211.json new file mode 100644 index 0000000..bf22dc7 --- /dev/null +++ b/priv/resource_snapshots/test_repo/post_links/20240405234211.json @@ -0,0 +1,87 @@ +{ + "attributes": [ + { + "default": "nil", + "size": null, + "type": "text", + "source": "state", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "source_post_id", + "references": { + "name": "post_links_source_post_id_fkey", + "table": "posts", + "on_delete": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "primary_key?": true, + "destination_attribute": "id", + "on_update": null, + "deferrable": false, + "destination_attribute_default": null, + "destination_attribute_generated": null + }, + "allow_nil?": false, + "generated?": false, + "primary_key?": true + }, + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "destination_post_id", + "references": { + "name": "post_links_destination_post_id_fkey", + "table": "posts", + "on_delete": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "primary_key?": true, + "destination_attribute": "id", + "on_update": null, + "deferrable": false, + "destination_attribute_default": null, + "destination_attribute_generated": null + }, + "allow_nil?": false, + "generated?": false, + "primary_key?": true + } + ], + "table": "post_links", + "hash": "6ADC017A784C2619574DE223A15A29ECAF6D67C0543DF67A8E4E215E8F8ED300", + "repo": "Elixir.AshSqlite.TestRepo", + "identities": [ + { + "name": "unique_link", + "keys": [ + "source_post_id", + "destination_post_id" + ], + "base_filter": null, + "index_name": "post_links_unique_link_index" + } + ], + "base_filter": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "custom_indexes": [], + "custom_statements": [], + "has_create_action": true +} \ No newline at end of file diff --git a/priv/resource_snapshots/test_repo/post_ratings/20240405234211.json b/priv/resource_snapshots/test_repo/post_ratings/20240405234211.json new file mode 100644 index 0000000..8152559 --- /dev/null +++ b/priv/resource_snapshots/test_repo/post_ratings/20240405234211.json @@ -0,0 +1,62 @@ +{ + "attributes": [ + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "id", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": true + }, + { + "default": "nil", + "size": null, + "type": "bigint", + "source": "score", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "resource_id", + "references": { + "name": "post_ratings_resource_id_fkey", + "table": "posts", + "on_delete": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "primary_key?": true, + "destination_attribute": "id", + "on_update": null, + "deferrable": false, + "destination_attribute_default": "nil", + "destination_attribute_generated": false + }, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + } + ], + "table": "post_ratings", + "hash": "73A4E0A79F5A6449FFE48E2469FDC275723EF207780DA9027F3BBE3119DC0FFA", + "repo": "Elixir.AshSqlite.TestRepo", + "identities": [], + "base_filter": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "custom_indexes": [], + "custom_statements": [], + "has_create_action": true +} \ No newline at end of file diff --git a/priv/resource_snapshots/test_repo/post_views/20240405234211.json b/priv/resource_snapshots/test_repo/post_views/20240405234211.json new file mode 100644 index 0000000..c11833f --- /dev/null +++ b/priv/resource_snapshots/test_repo/post_views/20240405234211.json @@ -0,0 +1,47 @@ +{ + "attributes": [ + { + "default": "nil", + "size": null, + "type": "utc_datetime_usec", + "source": "time", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "browser", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "post_id", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": false + } + ], + "table": "post_views", + "hash": "D0749D9F514E36781D95F2967C97860C58C6DEAE95543DFAAB0E9C09A1480E93", + "repo": "Elixir.AshSqlite.TestRepo", + "identities": [], + "base_filter": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "custom_indexes": [], + "custom_statements": [], + "has_create_action": true +} \ No newline at end of file diff --git a/priv/resource_snapshots/test_repo/posts/20240405234211.json b/priv/resource_snapshots/test_repo/posts/20240405234211.json new file mode 100644 index 0000000..3eaa55d --- /dev/null +++ b/priv/resource_snapshots/test_repo/posts/20240405234211.json @@ -0,0 +1,261 @@ +{ + "attributes": [ + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "id", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": true + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "title", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "bigint", + "source": "score", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "boolean", + "source": "public", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "citext", + "source": "category", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "type", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "bigint", + "source": "price", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "decimal", + "source": "decimal", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "status", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "status", + "source": "status_enum", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "map", + "source": "stuff", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "uniq_one", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "uniq_two", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "uniq_custom_one", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "uniq_custom_two", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "utc_datetime_usec", + "source": "created_at", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "utc_datetime_usec", + "source": "updated_at", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "organization_id", + "references": { + "name": "posts_organization_id_fkey", + "table": "orgs", + "on_delete": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "primary_key?": true, + "destination_attribute": "id", + "on_update": null, + "deferrable": false, + "destination_attribute_default": null, + "destination_attribute_generated": null + }, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "author_id", + "references": { + "name": "posts_author_id_fkey", + "table": "authors", + "on_delete": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "primary_key?": true, + "destination_attribute": "id", + "on_update": null, + "deferrable": false, + "destination_attribute_default": null, + "destination_attribute_generated": null + }, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + } + ], + "table": "posts", + "hash": "00D35B64138747A522AD4EAB9BB8E09BDFE30C95844FD1D46E0951E85EA18FBE", + "repo": "Elixir.AshSqlite.TestRepo", + "identities": [ + { + "name": "uniq_one_and_two", + "keys": [ + "uniq_one", + "uniq_two" + ], + "base_filter": "type = 'sponsored'", + "index_name": "posts_uniq_one_and_two_index" + } + ], + "base_filter": "type = 'sponsored'", + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "custom_indexes": [ + { + "message": "dude what the heck", + "name": null, + "table": null, + "include": null, + "fields": [ + "uniq_custom_one", + "uniq_custom_two" + ], + "where": null, + "unique": true, + "using": null + } + ], + "custom_statements": [], + "has_create_action": true +} \ No newline at end of file diff --git a/priv/resource_snapshots/test_repo/profile/20240405234211.json b/priv/resource_snapshots/test_repo/profile/20240405234211.json new file mode 100644 index 0000000..5a32e97 --- /dev/null +++ b/priv/resource_snapshots/test_repo/profile/20240405234211.json @@ -0,0 +1,62 @@ +{ + "attributes": [ + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "id", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": true + }, + { + "default": "nil", + "size": null, + "type": "text", + "source": "description", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "author_id", + "references": { + "name": "profile_author_id_fkey", + "table": "authors", + "on_delete": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "primary_key?": true, + "destination_attribute": "id", + "on_update": null, + "deferrable": false, + "destination_attribute_default": null, + "destination_attribute_generated": null + }, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + } + ], + "table": "profile", + "hash": "710F812AC63D2051F6AB22912CE5304088AF1D8F03C2BAFDC07EB24FA62136C2", + "repo": "Elixir.AshSqlite.TestRepo", + "identities": [], + "base_filter": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "custom_indexes": [], + "custom_statements": [], + "has_create_action": true +} \ No newline at end of file diff --git a/priv/resource_snapshots/test_repo/users/20240405234211.json b/priv/resource_snapshots/test_repo/users/20240405234211.json new file mode 100644 index 0000000..27c8593 --- /dev/null +++ b/priv/resource_snapshots/test_repo/users/20240405234211.json @@ -0,0 +1,62 @@ +{ + "attributes": [ + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "id", + "references": null, + "allow_nil?": false, + "generated?": false, + "primary_key?": true + }, + { + "default": "nil", + "size": null, + "type": "boolean", + "source": "is_active", + "references": null, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + }, + { + "default": "nil", + "size": null, + "type": "uuid", + "source": "organization_id", + "references": { + "name": "users_organization_id_fkey", + "table": "orgs", + "on_delete": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "primary_key?": true, + "destination_attribute": "id", + "on_update": null, + "deferrable": false, + "destination_attribute_default": null, + "destination_attribute_generated": null + }, + "allow_nil?": true, + "generated?": false, + "primary_key?": false + } + ], + "table": "users", + "hash": "F1D2233C0B448A17B31E8971DEF529020894252BBF5BAFD58D7280FA36249071", + "repo": "Elixir.AshSqlite.TestRepo", + "identities": [], + "base_filter": null, + "multitenancy": { + "global": null, + "strategy": null, + "attribute": null + }, + "custom_indexes": [], + "custom_statements": [], + "has_create_action": true +} \ No newline at end of file diff --git a/priv/test_repo/migrations/20240405234211_migrate_resources1.exs b/priv/test_repo/migrations/20240405234211_migrate_resources1.exs new file mode 100644 index 0000000..3c75ddd --- /dev/null +++ b/priv/test_repo/migrations/20240405234211_migrate_resources1.exs @@ -0,0 +1,231 @@ +defmodule AshSqlite.TestRepo.Migrations.MigrateResources1 do + @moduledoc """ + Updates resources based on their most recent snapshots. + + This file was autogenerated with `mix ash_sqlite.generate_migrations` + """ + + use Ecto.Migration + + def up do + create table(:orgs, primary_key: false) do + add :name, :text + add :id, :uuid, null: false, primary_key: true + end + + create table(:authors, primary_key: false) do + #add :badges, {:array, :text} + add :bio, :map + add :last_name, :text + add :first_name, :text + add :id, :uuid, null: false, primary_key: true + end + + create table(:users, primary_key: false) do + add :organization_id, + references(:orgs, column: :id, name: "users_organization_id_fkey", type: :uuid) + + add :is_active, :boolean + add :id, :uuid, null: false, primary_key: true + end + + create table(:profile, primary_key: false) do + add :author_id, + references(:authors, column: :id, name: "profile_author_id_fkey", type: :uuid) + + add :description, :text + add :id, :uuid, null: false, primary_key: true + end + + create table(:posts, primary_key: false) do + add :author_id, references(:authors, column: :id, name: "posts_author_id_fkey", type: :uuid) + + add :organization_id, + references(:orgs, column: :id, name: "posts_organization_id_fkey", type: :uuid) + + add :updated_at, :utc_datetime_usec, null: false + add :created_at, :utc_datetime_usec, null: false + add :uniq_custom_two, :text + add :uniq_custom_one, :text + add :uniq_two, :text + add :uniq_one, :text + add :stuff, :map + add :status_enum, :"ENUM('open', 'closed')" + add :status, :text + add :decimal, :decimal + add :price, :bigint + add :type, :text + #add :category, :citext + add :category, :text + add :public, :boolean + add :score, :bigint + add :title, :text + add :id, :uuid, null: false, primary_key: true + end + + create table(:post_views, primary_key: false) do + add :post_id, :uuid, null: false + add :browser, :text + add :time, :utc_datetime_usec, null: false + end + + create table(:post_ratings, primary_key: false) do + add :resource_id, + references(:posts, column: :id, name: "post_ratings_resource_id_fkey", type: :uuid) + + add :score, :bigint + add :id, :uuid, null: false, primary_key: true + end + + create table(:post_links, primary_key: false) do + add :destination_post_id, + references(:posts, + column: :id, + name: "post_links_destination_post_id_fkey", + type: :uuid + ), + primary_key: true, + null: false + + add :source_post_id, + references(:posts, column: :id, name: "post_links_source_post_id_fkey", type: :uuid), + primary_key: true, + null: false + + add :state, :text + end + + create unique_index(:post_links, [:source_post_id, :destination_post_id], + name: "post_links_unique_link_index" + ) + + create table(:managers, primary_key: false) do + add :organization_id, + references(:orgs, column: :id, name: "managers_organization_id_fkey", type: :uuid) + + add :role, :text + add :must_be_present, :text, null: false + add :code, :text, null: false + add :name, :text + add :id, :uuid, null: false, primary_key: true + end + + create unique_index(:managers, ["code(768)"], name: "managers_uniq_code_index") + + create table(:integer_posts, primary_key: false) do + add :title, :text + add :id, :bigserial, null: false, primary_key: true + end + + create table(:comments, primary_key: false) do + add :author_id, + references(:authors, column: :id, name: "comments_author_id_fkey", type: :uuid) + + add :post_id, + references(:posts, + column: :id, + name: "special_name_fkey", + type: :uuid, + on_delete: :delete_all, + on_update: :update_all + ) + + add :created_at, :utc_datetime_usec, null: false + add :arbitrary_timestamp, :utc_datetime_usec + add :likes, :bigint + add :title, :text + add :id, :uuid, null: false, primary_key: true + end + + create table(:comment_ratings, primary_key: false) do + add :resource_id, + references(:comments, + column: :id, + name: "comment_ratings_resource_id_fkey", + type: :uuid + ) + + add :score, :bigint + add :id, :uuid, null: false, primary_key: true + end + + create index(:posts, ["uniq_custom_one(384)", "uniq_custom_two(384)"], unique: true) + + create unique_index(:posts, ["uniq_one(384)", "uniq_two(384)"], + # where: "type = 'sponsored'", + name: "posts_uniq_one_and_two_index" + ) + + create table(:accounts, primary_key: false) do + add :user_id, references(:users, column: :id, name: "accounts_user_id_fkey", type: :uuid) + add :is_active, :boolean + add :id, :uuid, null: false, primary_key: true + end + end + + def down do + drop constraint(:accounts, "accounts_user_id_fkey") + + drop table(:accounts) + + drop_if_exists unique_index(:posts, [:uniq_one, :uniq_two], + name: "posts_uniq_one_and_two_index" + ) + + drop_if_exists index(:posts, ["uniq_custom_one", "uniq_custom_two"], + name: "posts_uniq_custom_one_uniq_custom_two_index" + ) + + drop table(:authors) + + drop constraint(:comment_ratings, "comment_ratings_resource_id_fkey") + + drop table(:comment_ratings) + + drop constraint(:comments, "special_name_fkey") + + drop constraint(:comments, "comments_author_id_fkey") + + drop table(:comments) + + drop table(:integer_posts) + + drop_if_exists unique_index(:managers, [:code], name: "managers_uniq_code_index") + + drop constraint(:managers, "managers_organization_id_fkey") + + drop table(:managers) + + drop table(:orgs) + + drop_if_exists unique_index(:post_links, [:source_post_id, :destination_post_id], + name: "post_links_unique_link_index" + ) + + drop constraint(:post_links, "post_links_source_post_id_fkey") + + drop constraint(:post_links, "post_links_destination_post_id_fkey") + + drop table(:post_links) + + drop constraint(:post_ratings, "post_ratings_resource_id_fkey") + + drop table(:post_ratings) + + drop table(:post_views) + + drop constraint(:posts, "posts_organization_id_fkey") + + drop constraint(:posts, "posts_author_id_fkey") + + drop table(:posts) + + drop constraint(:profile, "profile_author_id_fkey") + + drop table(:profile) + + drop constraint(:users, "users_organization_id_fkey") + + drop table(:users) + end +end diff --git a/test/atomics_test.exs b/test/atomics_test.exs new file mode 100644 index 0000000..ae1ad57 --- /dev/null +++ b/test/atomics_test.exs @@ -0,0 +1,75 @@ +defmodule AshSqlite.AtomicsTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.Post + + import Ash.Expr + + test "atomics work on upserts" do + id = Ash.UUID.generate() + + Post + |> Ash.Changeset.for_create(:create, %{id: id, title: "foo", price: 1}, upsert?: true) + |> Ash.Changeset.atomic_update(:price, expr(price + 1)) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{id: id, title: "foo", price: 1}, upsert?: true) + |> Ash.Changeset.atomic_update(:price, expr(price + 1)) + |> Ash.create!() + + assert [%{price: 2}] = Post |> Ash.read!() + end + + test "a basic atomic works" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "foo", price: 1}) + |> Ash.create!() + + assert %{price: 2} = + post + |> Ash.Changeset.for_update(:update, %{}) + |> Ash.Changeset.atomic_update(:price, expr(price + 1)) + |> Ash.update!() + end + + test "an atomic that violates a constraint will return the proper error" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "foo", price: 1}) + |> Ash.create!() + + assert_raise Ash.Error.Invalid, ~r/does not exist/, fn -> + post + |> Ash.Changeset.for_update(:update, %{}) + |> Ash.Changeset.atomic_update(:organization_id, Ash.UUID.generate()) + |> Ash.update!() + end + end + + test "an atomic can refer to a calculation" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "foo", price: 1}) + |> Ash.create!() + + post = + post + |> Ash.Changeset.for_update(:update, %{}) + |> Ash.Changeset.atomic_update(:score, expr(score_after_winning)) + |> Ash.update!() + + assert post.score == 1 + end + + test "an atomic can be attached to an action" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "foo", price: 1}) + |> Ash.create!() + + assert Post.increment_score!(post, 2).score == 2 + + assert Post.increment_score!(post, 2).score == 4 + end +end diff --git a/test/bulk_create_test.exs b/test/bulk_create_test.exs new file mode 100644 index 0000000..4b6cda6 --- /dev/null +++ b/test/bulk_create_test.exs @@ -0,0 +1,143 @@ +defmodule AshSqlite.BulkCreateTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.Post + + describe "bulk creates" do + test "bulk creates insert each input" do + Ash.bulk_create!([%{title: "fred"}, %{title: "george"}], Post, :create) + + assert [%{title: "fred"}, %{title: "george"}] = + Post + |> Ash.Query.sort(:title) + |> Ash.read!() + end + + test "bulk creates can be streamed" do + assert [{:ok, %{title: "fred"}}, {:ok, %{title: "george"}}] = + Ash.bulk_create!([%{title: "fred"}, %{title: "george"}], Post, :create, + return_stream?: true, + return_records?: true + ) + |> Enum.sort_by(fn {:ok, result} -> result.title end) + end + + test "bulk creates can upsert" do + assert [ + {:ok, %{title: "fred", uniq_one: "one", uniq_two: "two", price: 10}}, + {:ok, %{title: "george", uniq_one: "three", uniq_two: "four", price: 20}} + ] = + Ash.bulk_create!( + [ + %{title: "fred", uniq_one: "one", uniq_two: "two", price: 10}, + %{title: "george", uniq_one: "three", uniq_two: "four", price: 20} + ], + Post, + :create, + return_stream?: true, + return_records?: true + ) + |> Enum.sort_by(fn {:ok, result} -> result.title end) + + assert [ + {:ok, %{title: "fred", uniq_one: "one", uniq_two: "two", price: 1000}}, + {:ok, %{title: "george", uniq_one: "three", uniq_two: "four", price: 20_000}} + ] = + Ash.bulk_create!( + [ + %{title: "something", uniq_one: "one", uniq_two: "two", price: 1000}, + %{title: "else", uniq_one: "three", uniq_two: "four", price: 20_000} + ], + Post, + :create, + upsert?: true, + upsert_identity: :uniq_one_and_two, + upsert_fields: [:price], + return_stream?: true, + return_records?: true + ) + |> Enum.sort_by(fn + {:ok, result} -> + result.title + + _ -> + nil + end) + end + + test "bulk creates can create relationships" do + Ash.bulk_create!( + [%{title: "fred", rating: %{score: 5}}, %{title: "george", rating: %{score: 0}}], + Post, + :create + ) + + assert [ + %{title: "fred", ratings: [%{score: 5}]}, + %{title: "george", ratings: [%{score: 0}]} + ] = + Post + |> Ash.Query.sort(:title) + |> Ash.Query.load(:ratings) + |> Ash.read!() + end + end + + describe "validation errors" do + test "skips invalid by default" do + assert %{records: [_], errors: [_]} = + Ash.bulk_create!([%{title: "fred"}, %{title: "not allowed"}], Post, :create, + return_records?: true, + return_errors?: true + ) + end + + test "returns errors in the stream" do + assert [{:ok, _}, {:error, _}] = + Ash.bulk_create!([%{title: "fred"}, %{title: "not allowed"}], Post, :create, + return_records?: true, + return_stream?: true, + return_errors?: true + ) + |> Enum.to_list() + end + end + + describe "database errors" do + test "database errors affect the entire batch" do + org = + AshSqlite.Test.Organization + |> Ash.Changeset.for_create(:create, %{name: "foo"}) + |> Ash.create!() + + Ash.bulk_create( + [ + %{title: "fred", organization_id: org.id}, + %{title: "george", organization_id: Ash.UUID.generate()} + ], + Post, + :create, + return_records?: true + ) + + assert [] = + Post + |> Ash.Query.sort(:title) + |> Ash.read!() + end + + test "database errors don't affect other batches" do + Ash.bulk_create( + [%{title: "george", organization_id: Ash.UUID.generate()}, %{title: "fred"}], + Post, + :create, + return_records?: true, + batch_size: 1 + ) + + assert [%{title: "fred"}] = + Post + |> Ash.Query.sort(:title) + |> Ash.read!() + end + end +end diff --git a/test/calculation_test.exs b/test/calculation_test.exs new file mode 100644 index 0000000..14a13ac --- /dev/null +++ b/test/calculation_test.exs @@ -0,0 +1,274 @@ +defmodule AshSqlite.CalculationTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Account, Author, Comment, Post, User} + + require Ash.Query + + test "calculations can refer to embedded attributes" do + author = + Author + |> Ash.Changeset.for_create(:create, %{bio: %{title: "Mr.", bio: "Bones"}}) + |> Ash.create!() + + assert %{title: "Mr."} = + Author + |> Ash.Query.filter(id == ^author.id) + |> Ash.Query.load(:title) + |> Ash.read_one!() + end + + test "calculations can use the || operator" do + author = + Author + |> Ash.Changeset.for_create(:create, %{bio: %{title: "Mr.", bio: "Bones"}}) + |> Ash.create!() + + assert %{first_name_or_bob: "bob"} = + Author + |> Ash.Query.filter(id == ^author.id) + |> Ash.Query.load(:first_name_or_bob) + |> Ash.read_one!() + end + + test "calculations can use the && operator" do + author = + Author + |> Ash.Changeset.for_create(:create, %{ + first_name: "fred", + bio: %{title: "Mr.", bio: "Bones"} + }) + |> Ash.create!() + + assert %{first_name_and_bob: "bob"} = + Author + |> Ash.Query.filter(id == ^author.id) + |> Ash.Query.load(:first_name_and_bob) + |> Ash.read_one!() + end + + test "concat calculation can be filtered on" do + author = + Author + |> Ash.Changeset.for_create(:create, %{first_name: "is", last_name: "match"}) + |> Ash.create!() + + Author + |> Ash.Changeset.for_create(:create, %{first_name: "not", last_name: "match"}) + |> Ash.create!() + + author_id = author.id + + assert %{id: ^author_id} = + Author + |> Ash.Query.load(:full_name) + |> Ash.Query.filter(full_name == "is match") + |> Ash.read_one!() + end + + test "conditional calculations can be filtered on" do + author = + Author + |> Ash.Changeset.for_create(:create, %{first_name: "tom"}) + |> Ash.create!() + + Author + |> Ash.Changeset.for_create(:create, %{first_name: "tom", last_name: "holland"}) + |> Ash.create!() + + author_id = author.id + + assert %{id: ^author_id} = + Author + |> Ash.Query.load([:conditional_full_name, :full_name]) + |> Ash.Query.filter(conditional_full_name == "(none)") + |> Ash.read_one!() + end + + test "parameterized calculations can be filtered on" do + Author + |> Ash.Changeset.for_create(:create, %{first_name: "tom", last_name: "holland"}) + |> Ash.create!() + + assert %{param_full_name: "tom holland"} = + Author + |> Ash.Query.load(:param_full_name) + |> Ash.read_one!() + + assert %{param_full_name: "tom~holland"} = + Author + |> Ash.Query.load(param_full_name: [separator: "~"]) + |> Ash.read_one!() + + assert %{} = + Author + |> Ash.Query.filter(param_full_name(separator: "~") == "tom~holland") + |> Ash.read_one!() + end + + test "parameterized related calculations can be filtered on" do + author = + Author + |> Ash.Changeset.for_create(:create, %{first_name: "tom", last_name: "holland"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "match"}) + |> Ash.Changeset.manage_relationship(:author, author, type: :append_and_remove) + |> Ash.create!() + + assert %{title: "match"} = + Comment + |> Ash.Query.filter(author.param_full_name(separator: "~") == "tom~holland") + |> Ash.read_one!() + + assert %{title: "match"} = + Comment + |> Ash.Query.filter( + author.param_full_name(separator: "~") == "tom~holland" and + author.param_full_name(separator: " ") == "tom holland" + ) + |> Ash.read_one!() + end + + test "parameterized calculations can be sorted on" do + Author + |> Ash.Changeset.for_create(:create, %{first_name: "tom", last_name: "holland"}) + |> Ash.create!() + + Author + |> Ash.Changeset.for_create(:create, %{first_name: "abc", last_name: "def"}) + |> Ash.create!() + + assert [%{first_name: "abc"}, %{first_name: "tom"}] = + Author + |> Ash.Query.sort(param_full_name: [separator: "~"]) + |> Ash.read!() + end + + test "calculations using if and literal boolean results can run" do + Post + |> Ash.Query.load(:was_created_in_the_last_month) + |> Ash.Query.filter(was_created_in_the_last_month == true) + |> Ash.read!() + end + + test "nested conditional calculations can be loaded" do + Author + |> Ash.Changeset.for_create(:create, %{last_name: "holland"}) + |> Ash.create!() + + Author + |> Ash.Changeset.for_create(:create, %{first_name: "tom"}) + |> Ash.create!() + + assert [%{nested_conditional: "No First Name"}, %{nested_conditional: "No Last Name"}] = + Author + |> Ash.Query.load(:nested_conditional) + |> Ash.Query.sort(:nested_conditional) + |> Ash.read!() + end + + test "loading a calculation loads its dependent loads" do + user = + User + |> Ash.Changeset.for_create(:create, %{is_active: true}) + |> Ash.create!() + + account = + Account + |> Ash.Changeset.for_create(:create, %{is_active: true}) + |> Ash.Changeset.manage_relationship(:user, user, type: :append_and_remove) + |> Ash.create!() + |> Ash.load!([:active]) + + assert account.active + end + + describe "-/1" do + test "makes numbers negative" do + Post + |> Ash.Changeset.for_create(:create, %{title: "match", score: 42}) + |> Ash.create!() + + assert [%{negative_score: -42}] = + Post + |> Ash.Query.load(:negative_score) + |> Ash.read!() + end + end + + describe "maps" do + test "maps can be constructed" do + Post + |> Ash.Changeset.for_create(:create, %{title: "match", score: 42}) + |> Ash.create!() + + assert [%{score_map: %{negative_score: %{foo: -42}}}] = + Post + |> Ash.Query.load(:score_map) + |> Ash.read!() + end + end + + test "dependent calc" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "match", price: 10_024}) + |> Ash.create!() + + Post.get_by_id(post.id, + query: Post |> Ash.Query.select([:id]) |> Ash.Query.load([:price_string_with_currency_sign]) + ) + end + + test "nested get_path works" do + assert "thing" = + Post + |> Ash.Changeset.for_create(:create, %{ + title: "match", + price: 10_024, + stuff: %{foo: %{bar: "thing"}} + }) + |> Ash.Changeset.deselect(:stuff) + |> Ash.create!() + |> Ash.load!(:foo_bar_from_stuff) + |> Map.get(:foo_bar_from_stuff) + end + + test "contains uses instr" do + Post + |> Ash.Changeset.for_create(:create, %{ + title: "foo-dude-bar" + }) + |> Ash.create!() + + assert Post + |> Ash.Query.filter(contains(title, "-dude-")) + |> Ash.read_one!() + end + + test "runtime expression calcs" do + author = + Author + |> Ash.Changeset.for_create(:create, %{ + first_name: "Bill", + last_name: "Jones", + bio: %{title: "Mr.", bio: "Bones"} + }) + |> Ash.create!() + + assert %AshSqlite.Test.Money{} = + Post + |> Ash.Changeset.for_create(:create, %{title: "match", price: 10_024}) + |> Ash.Changeset.manage_relationship(:author, author, type: :append_and_remove) + |> Ash.create!() + |> Ash.load!(:calc_returning_json) + |> Map.get(:calc_returning_json) + + assert [%AshSqlite.Test.Money{}] = + author + |> Ash.load!(posts: :calc_returning_json) + |> Map.get(:posts) + |> Enum.map(&Map.get(&1, :calc_returning_json)) + end +end diff --git a/test/custom_index_test.exs b/test/custom_index_test.exs new file mode 100644 index 0000000..d45b0f4 --- /dev/null +++ b/test/custom_index_test.exs @@ -0,0 +1,28 @@ +defmodule AshSqlite.Test.CustomIndexTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.Post + + require Ash.Query + + test "unique constraint errors are properly caught" do + Post + |> Ash.Changeset.for_create(:create, %{ + title: "first", + uniq_custom_one: "what", + uniq_custom_two: "what2" + }) + |> Ash.create!() + + assert_raise Ash.Error.Invalid, + ~r/Invalid value provided for uniq_custom_one: dude what the heck/, + fn -> + Post + |> Ash.Changeset.for_create(:create, %{ + title: "first", + uniq_custom_one: "what", + uniq_custom_two: "what2" + }) + |> Ash.create!() + end + end +end diff --git a/test/ecto_compatibility_test.exs b/test/ecto_compatibility_test.exs new file mode 100644 index 0000000..e47f805 --- /dev/null +++ b/test/ecto_compatibility_test.exs @@ -0,0 +1,15 @@ +defmodule AshSqlite.EctoCompatibilityTest do + use AshSqlite.RepoCase, async: false + require Ash.Query + + test "call Ecto.Repo.insert! via Ash Repo" do + org = + %AshSqlite.Test.Organization{ + id: Ash.UUID.generate(), + name: "The Org" + } + |> AshSqlite.TestRepo.insert!() + + assert org.name == "The Org" + end +end diff --git a/test/embeddable_resource_test.exs b/test/embeddable_resource_test.exs new file mode 100644 index 0000000..8bb95c3 --- /dev/null +++ b/test/embeddable_resource_test.exs @@ -0,0 +1,34 @@ +defmodule AshSqlite.EmbeddableResourceTest do + @moduledoc false + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Author, Bio, Post} + + require Ash.Query + + setup do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.create!() + + %{post: post} + end + + test "calculations can load json", %{post: post} do + assert %{calc_returning_json: %AshSqlite.Test.Money{amount: 100, currency: :usd}} = + Ash.load!(post, :calc_returning_json) + end + + test "embeds with list attributes set to nil are loaded as nil" do + post = + Author + |> Ash.Changeset.for_create(:create, %{bio: %Bio{list_of_strings: nil}}) + |> Ash.create!() + + assert is_nil(post.bio.list_of_strings) + + post = Ash.reload!(post) + + assert is_nil(post.bio.list_of_strings) + end +end diff --git a/test/enum_test.exs b/test/enum_test.exs new file mode 100644 index 0000000..a0cff4b --- /dev/null +++ b/test/enum_test.exs @@ -0,0 +1,13 @@ +defmodule AshSqlite.EnumTest do + @moduledoc false + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.Post + + require Ash.Query + + test "valid values are properly inserted" do + Post + |> Ash.Changeset.for_create(:create, %{title: "title", status: :open}) + |> Ash.create!() + end +end diff --git a/test/filter_test.exs b/test/filter_test.exs new file mode 100644 index 0000000..6fc8095 --- /dev/null +++ b/test/filter_test.exs @@ -0,0 +1,655 @@ +defmodule AshSqlite.FilterTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Author, Comment, Post} + + require Ash.Query + + describe "with no filter applied" do + test "with no data" do + assert [] = Ash.read!(Post) + end + + test "with data" do + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.create!() + + assert [%Post{title: "title"}] = Ash.read!(Post) + end + end + + describe "invalid uuid" do + test "with an invalid uuid, an invalid error is raised" do + assert_raise Ash.Error.Invalid, fn -> + Post + |> Ash.Query.filter(id == "foo") + |> Ash.read!() + end + end + end + + describe "with a simple filter applied" do + test "with no data" do + results = + Post + |> Ash.Query.filter(title == "title") + |> Ash.read!() + + assert [] = results + end + + test "with data that matches" do + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.create!() + + results = + Post + |> Ash.Query.filter(title == "title") + |> Ash.read!() + + assert [%Post{title: "title"}] = results + end + + test "with some data that matches and some data that doesnt" do + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.create!() + + results = + Post + |> Ash.Query.filter(title == "no_title") + |> Ash.read!() + + assert [] = results + end + + test "with related data that doesn't match" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "not match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + results = + Post + |> Ash.Query.filter(comments.title == "match") + |> Ash.read!() + + assert [] = results + end + + test "with related data two steps away that matches" do + author = + Author + |> Ash.Changeset.for_create(:create, %{first_name: "match"}) + |> Ash.create!() + + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.Changeset.manage_relationship(:author, author, type: :append_and_remove) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "title2"}) + |> Ash.Changeset.manage_relationship(:linked_posts, [post], type: :append_and_remove) + |> Ash.Changeset.manage_relationship(:author, author, type: :append_and_remove) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "not match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.Changeset.manage_relationship(:author, author, type: :append_and_remove) + |> Ash.create!() + + results = + Comment + |> Ash.Query.filter(author.posts.linked_posts.title == "title") + |> Ash.read!() + + assert [_] = results + end + + test "with related data that does match" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + results = + Post + |> Ash.Query.filter(comments.title == "match") + |> Ash.read!() + + assert [%Post{title: "title"}] = results + end + + test "with related data that does and doesn't match" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "not match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + results = + Post + |> Ash.Query.filter(comments.title == "match") + |> Ash.read!() + + assert [%Post{title: "title"}] = results + end + end + + describe "in" do + test "it properly filters" do + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "title1"}) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "title2"}) + |> Ash.create!() + + assert [%Post{title: "title1"}, %Post{title: "title2"}] = + Post + |> Ash.Query.filter(title in ["title1", "title2"]) + |> Ash.Query.sort(title: :asc) + |> Ash.read!() + end + end + + describe "with a boolean filter applied" do + test "with no data" do + results = + Post + |> Ash.Query.filter(title == "title" or score == 1) + |> Ash.read!() + + assert [] = results + end + + test "with data that doesn't match" do + Post + |> Ash.Changeset.for_create(:create, %{title: "no title", score: 2}) + |> Ash.create!() + + results = + Post + |> Ash.Query.filter(title == "title" or score == 1) + |> Ash.read!() + + assert [] = results + end + + test "with data that matches both conditions" do + Post + |> Ash.Changeset.for_create(:create, %{title: "title", score: 0}) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{score: 1, title: "nothing"}) + |> Ash.create!() + + results = + Post + |> Ash.Query.filter(title == "title" or score == 1) + |> Ash.read!() + |> Enum.sort_by(& &1.score) + + assert [%Post{title: "title", score: 0}, %Post{title: "nothing", score: 1}] = results + end + + test "with data that matches one condition and data that matches nothing" do + Post + |> Ash.Changeset.for_create(:create, %{title: "title", score: 0}) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{score: 2, title: "nothing"}) + |> Ash.create!() + + results = + Post + |> Ash.Query.filter(title == "title" or score == 1) + |> Ash.read!() + |> Enum.sort_by(& &1.score) + + assert [%Post{title: "title", score: 0}] = results + end + + test "with related data in an or statement that matches, while basic filter doesn't match" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "doesn't match"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + results = + Post + |> Ash.Query.filter(title == "match" or comments.title == "match") + |> Ash.read!() + + assert [%Post{title: "doesn't match"}] = results + end + + test "with related data in an or statement that doesn't match, while basic filter does match" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "match"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "doesn't match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + results = + Post + |> Ash.Query.filter(title == "match" or comments.title == "match") + |> Ash.read!() + + assert [%Post{title: "match"}] = results + end + + test "with related data and an inner join condition" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "match"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + results = + Post + |> Ash.Query.filter(title == comments.title) + |> Ash.read!() + + assert [%Post{title: "match"}] = results + + results = + Post + |> Ash.Query.filter(title != comments.title) + |> Ash.read!() + + assert [] = results + end + end + + describe "accessing embeds" do + setup do + Author + |> Ash.Changeset.for_create(:create, + bio: %{title: "Dr.", bio: "Strange", years_of_experience: 10} + ) + |> Ash.create!() + + Author + |> Ash.Changeset.for_create(:create, + bio: %{title: "Highlander", bio: "There can be only one."} + ) + |> Ash.create!() + + :ok + end + + test "works using simple equality" do + assert [%{bio: %{title: "Dr."}}] = + Author + |> Ash.Query.filter(bio[:title] == "Dr.") + |> Ash.read!() + end + + test "works using simple equality for integers" do + assert [%{bio: %{title: "Dr."}}] = + Author + |> Ash.Query.filter(bio[:years_of_experience] == 10) + |> Ash.read!() + end + + test "calculations that use embeds can be filtered on" do + assert [%{bio: %{title: "Dr."}}] = + Author + |> Ash.Query.filter(title == "Dr.") + |> Ash.read!() + end + end + + describe "basic expressions" do + test "basic expressions work" do + Post + |> Ash.Changeset.for_create(:create, %{title: "match", score: 4}) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "non_match", score: 2}) + |> Ash.create!() + + assert [%{title: "match"}] = + Post + |> Ash.Query.filter(score + 1 == 5) + |> Ash.read!() + end + end + + describe "case insensitive fields" do + test "it matches case insensitively" do + Post + |> Ash.Changeset.for_create(:create, %{title: "match", category: "FoObAr"}) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{category: "bazbuz"}) + |> Ash.create!() + + assert [%{title: "match"}] = + Post + |> Ash.Query.filter(category == "fOoBaR") + |> Ash.read!() + end + end + + describe "exists/2" do + test "it works with single relationships" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "match"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "abba"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + post2 = + Post + |> Ash.Changeset.for_create(:create, %{title: "no_match"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "acca"}) + |> Ash.Changeset.manage_relationship(:post, post2, type: :append_and_remove) + |> Ash.create!() + + assert [%{title: "match"}] = + Post + |> Ash.Query.filter(exists(comments, title == ^"abba")) + |> Ash.read!() + end + + test "it works with many to many relationships" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "a"}) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "b"}) + |> Ash.Changeset.manage_relationship(:linked_posts, [post], type: :append_and_remove) + |> Ash.create!() + + assert [%{title: "b"}] = + Post + |> Ash.Query.filter(exists(linked_posts, title == ^"a")) + |> Ash.read!() + end + + test "it works with join association relationships" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "a"}) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "b"}) + |> Ash.Changeset.manage_relationship(:linked_posts, [post], type: :append_and_remove) + |> Ash.create!() + + assert [%{title: "b"}] = + Post + |> Ash.Query.filter(exists(linked_posts, title == ^"a")) + |> Ash.read!() + end + + test "it works with nested relationships as the path" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "a"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "comment"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "b"}) + |> Ash.Changeset.manage_relationship(:linked_posts, [post], type: :append_and_remove) + |> Ash.create!() + + assert [%{title: "b"}] = + Post + |> Ash.Query.filter(exists(linked_posts.comments, title == ^"comment")) + |> Ash.read!() + end + + test "it works with an `at_path`" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "a"}) + |> Ash.create!() + + other_post = + Post + |> Ash.Changeset.for_create(:create, %{title: "other_a"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "comment"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "comment"}) + |> Ash.Changeset.manage_relationship(:post, other_post, type: :append_and_remove) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "b"}) + |> Ash.Changeset.manage_relationship(:linked_posts, [post], type: :append_and_remove) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "b"}) + |> Ash.Changeset.manage_relationship(:linked_posts, [other_post], type: :append_and_remove) + |> Ash.create!() + + assert [%{title: "b"}] = + Post + |> Ash.Query.filter( + linked_posts.title == "a" and + linked_posts.exists(comments, title == ^"comment") + ) + |> Ash.read!() + + assert [%{title: "b"}] = + Post + |> Ash.Query.filter( + linked_posts.title == "a" and + linked_posts.exists(comments, title == ^"comment") + ) + |> Ash.read!() + end + + test "it works with nested relationships inside of exists" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "a"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "comment"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "b"}) + |> Ash.Changeset.manage_relationship(:linked_posts, [post], type: :append_and_remove) + |> Ash.create!() + + assert [%{title: "b"}] = + Post + |> Ash.Query.filter(exists(linked_posts, comments.title == ^"comment")) + |> Ash.read!() + end + end + + describe "filtering on enum types" do + test "it allows simple filtering" do + Post + |> Ash.Changeset.for_create(:create, status_enum: "open") + |> Ash.create!() + + assert %{status_enum: :open} = + Post + |> Ash.Query.filter(status_enum == ^"open") + |> Ash.read_one!() + end + + test "it allows simple filtering without casting" do + Post + |> Ash.Changeset.for_create(:create, status_enum_no_cast: "open") + |> Ash.create!() + + assert %{status_enum_no_cast: :open} = + Post + |> Ash.Query.filter(status_enum_no_cast == ^"open") + |> Ash.read_one!() + end + end + + describe "atom filters" do + test "it works on matches" do + Post + |> Ash.Changeset.for_create(:create, %{title: "match"}) + |> Ash.create!() + + result = + Post + |> Ash.Query.filter(type == :sponsored) + |> Ash.read!() + + assert [%Post{title: "match"}] = result + end + end + + describe "like" do + test "like builds and matches" do + Post + |> Ash.Changeset.for_create(:create, %{title: "MaTcH"}) + |> Ash.create!() + + results = + Post + |> Ash.Query.filter(like(title, "%aTc%")) + |> Ash.read!() + + assert [%Post{title: "MaTcH"}] = results + + results = + Post + |> Ash.Query.filter(like(title, "%atc%")) + |> Ash.read!() + + assert [] = results + end + end + + describe "ilike" do + test "ilike builds and matches" do + Post + |> Ash.Changeset.for_create(:create, %{title: "MaTcH"}) + |> Ash.create!() + + results = + Post + |> Ash.Query.filter(ilike(title, "%aTc%")) + |> Ash.read!() + + assert [%Post{title: "MaTcH"}] = results + + results = + Post + |> Ash.Query.filter(ilike(title, "%atc%")) + |> Ash.read!() + + assert [%Post{title: "MaTcH"}] = results + end + end + + describe "fragments" do + test "double replacement works" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "match", score: 4}) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "non_match", score: 2}) + |> Ash.create!() + + assert [%{title: "match"}] = + Post + |> Ash.Query.filter(fragment("? = ?", title, ^post.title)) + |> Ash.read!() + + assert [] = + Post + |> Ash.Query.filter(fragment("? = ?", title, "nope")) + |> Ash.read!() + end + end + + describe "filtering on relationships that themselves have filters" do + test "it doesn't raise an error" do + Comment + |> Ash.Query.filter(not is_nil(popular_ratings.id)) + |> Ash.read!() + end + + test "it doesn't raise an error when nested" do + Post + |> Ash.Query.filter(not is_nil(comments.popular_ratings.id)) + |> Ash.read!() + end + end +end diff --git a/test/load_test.exs b/test/load_test.exs new file mode 100644 index 0000000..495636d --- /dev/null +++ b/test/load_test.exs @@ -0,0 +1,247 @@ +defmodule AshSqlite.Test.LoadTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Comment, Post} + + require Ash.Query + + test "has_many relationships can be loaded" do + assert %Post{comments: %Ash.NotLoaded{type: :relationship}} = + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + results = + Post + |> Ash.Query.load(:comments) + |> Ash.read!() + + assert [%Post{comments: [%{title: "match"}]}] = results + end + + test "belongs_to relationships can be loaded" do + assert %Comment{post: %Ash.NotLoaded{type: :relationship}} = + comment = + Comment + |> Ash.Changeset.for_create(:create, %{}) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "match"}) + |> Ash.Changeset.manage_relationship(:comments, [comment], type: :append_and_remove) + |> Ash.create!() + + results = + Comment + |> Ash.Query.load(:post) + |> Ash.read!() + + assert [%Comment{post: %{title: "match"}}] = results + end + + test "many_to_many loads work" do + source_post = + Post + |> Ash.Changeset.for_create(:create, %{title: "source"}) + |> Ash.create!() + + destination_post = + Post + |> Ash.Changeset.for_create(:create, %{title: "destination"}) + |> Ash.create!() + + destination_post2 = + Post + |> Ash.Changeset.for_create(:create, %{title: "destination"}) + |> Ash.create!() + + source_post + |> Ash.Changeset.new() + |> Ash.Changeset.manage_relationship(:linked_posts, [destination_post, destination_post2], + type: :append_and_remove + ) + |> Ash.update!() + + results = + source_post + |> Ash.load!(:linked_posts) + + assert %{linked_posts: [%{title: "destination"}, %{title: "destination"}]} = results + end + + test "many_to_many loads work when nested" do + source_post = + Post + |> Ash.Changeset.for_create(:create, %{title: "source"}) + |> Ash.create!() + + destination_post = + Post + |> Ash.Changeset.for_create(:create, %{title: "destination"}) + |> Ash.create!() + + source_post + |> Ash.Changeset.new() + |> Ash.Changeset.manage_relationship(:linked_posts, [destination_post], + type: :append_and_remove + ) + |> Ash.update!() + + destination_post + |> Ash.Changeset.new() + |> Ash.Changeset.manage_relationship(:linked_posts, [source_post], type: :append_and_remove) + |> Ash.update!() + + results = + source_post + |> Ash.load!(linked_posts: :linked_posts) + + assert %{linked_posts: [%{title: "destination", linked_posts: [%{title: "source"}]}]} = + results + end + + describe "lateral join loads" do + # uncomment when lateral join is supported + # it does not necessarily have to be implemented *exactly* as lateral join + # test "parent references are resolved" do + # post1 = + # Post + # |> Ash.Changeset.new(%{title: "title"}) + # |> Api.create!() + + # post2 = + # Post + # |> Ash.Changeset.new(%{title: "title"}) + # |> Api.create!() + + # post2_id = post2.id + + # post3 = + # Post + # |> Ash.Changeset.new(%{title: "no match"}) + # |> Api.create!() + + # assert [%{posts_with_matching_title: [%{id: ^post2_id}]}] = + # Post + # |> Ash.Query.load(:posts_with_matching_title) + # |> Ash.Query.filter(id == ^post1.id) + # |> Api.read!() + + # assert [%{posts_with_matching_title: []}] = + # Post + # |> Ash.Query.load(:posts_with_matching_title) + # |> Ash.Query.filter(id == ^post3.id) + # |> Api.read!() + # end + + # test "parent references work when joining for filters" do + # %{id: post1_id} = + # Post + # |> Ash.Changeset.new(%{title: "title"}) + # |> Api.create!() + + # post2 = + # Post + # |> Ash.Changeset.new(%{title: "title"}) + # |> Api.create!() + + # Post + # |> Ash.Changeset.new(%{title: "no match"}) + # |> Api.create!() + + # Post + # |> Ash.Changeset.new(%{title: "no match"}) + # |> Api.create!() + + # assert [%{id: ^post1_id}] = + # Post + # |> Ash.Query.filter(posts_with_matching_title.id == ^post2.id) + # |> Api.read!() + # end + + # test "lateral join loads (loads with limits or offsets) are supported" do + # assert %Post{comments: %Ash.NotLoaded{type: :relationship}} = + # post = + # Post + # |> Ash.Changeset.new(%{title: "title"}) + # |> Api.create!() + + # Comment + # |> Ash.Changeset.new(%{title: "abc"}) + # |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + # |> Api.create!() + + # Comment + # |> Ash.Changeset.new(%{title: "def"}) + # |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + # |> Api.create!() + + # comments_query = + # Comment + # |> Ash.Query.limit(1) + # |> Ash.Query.sort(:title) + + # results = + # Post + # |> Ash.Query.load(comments: comments_query) + # |> Api.read!() + + # assert [%Post{comments: [%{title: "abc"}]}] = results + + # comments_query = + # Comment + # |> Ash.Query.limit(1) + # |> Ash.Query.sort(title: :desc) + + # results = + # Post + # |> Ash.Query.load(comments: comments_query) + # |> Api.read!() + + # assert [%Post{comments: [%{title: "def"}]}] = results + + # comments_query = + # Comment + # |> Ash.Query.limit(2) + # |> Ash.Query.sort(title: :desc) + + # results = + # Post + # |> Ash.Query.load(comments: comments_query) + # |> Api.read!() + + # assert [%Post{comments: [%{title: "def"}, %{title: "abc"}]}] = results + # end + + test "loading many to many relationships on records works without loading its join relationship when using code interface" do + source_post = + Post + |> Ash.Changeset.for_create(:create, %{title: "source"}) + |> Ash.create!() + + destination_post = + Post + |> Ash.Changeset.for_create(:create, %{title: "abc"}) + |> Ash.create!() + + destination_post2 = + Post + |> Ash.Changeset.for_create(:create, %{title: "def"}) + |> Ash.create!() + + source_post + |> Ash.Changeset.new() + |> Ash.Changeset.manage_relationship(:linked_posts, [destination_post, destination_post2], + type: :append_and_remove + ) + |> Ash.update!() + + assert %{linked_posts: [_, _]} = Post.get_by_id!(source_post.id, load: [:linked_posts]) + end + end +end diff --git a/test/manual_relationships_test.exs b/test/manual_relationships_test.exs new file mode 100644 index 0000000..5eaafe5 --- /dev/null +++ b/test/manual_relationships_test.exs @@ -0,0 +1,116 @@ +defmodule AshSqlite.Test.ManualRelationshipsTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Comment, Post} + + require Ash.Query + + describe "manual first" do + test "relationships can be filtered on with no data" do + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.create!() + + assert [] = + Post |> Ash.Query.filter(comments_containing_title.title == "title") |> Ash.read!() + end + + test "relationships can be filtered on with data" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "title2"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "title2"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "no match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + assert [_] = + Post + |> Ash.Query.filter(comments_containing_title.title == "title2") + |> Ash.read!() + end + end + + describe "manual last" do + test "relationships can be filtered on with no data" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "no match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + assert [] = + Comment + |> Ash.Query.filter(post.comments_containing_title.title == "title2") + |> Ash.read!() + end + + test "relationships can be filtered on with data" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "title2"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "title2"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "no match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + assert [_, _] = + Comment + |> Ash.Query.filter(post.comments_containing_title.title == "title2") + |> Ash.read!() + end + end + + describe "manual middle" do + test "relationships can be filtered on with data" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "title2"}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "title2"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "no match"}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + assert [_, _] = + Comment + |> Ash.Query.filter(post.comments_containing_title.post.title == "title") + |> Ash.read!() + end + end +end diff --git a/test/migration_generator_test.exs b/test/migration_generator_test.exs new file mode 100644 index 0000000..ee21de0 --- /dev/null +++ b/test/migration_generator_test.exs @@ -0,0 +1,816 @@ +defmodule AshSqlite.MigrationGeneratorTest do + use AshSqlite.RepoCase, async: false + @moduletag :migration + + import ExUnit.CaptureLog + + defmacrop defposts(mod \\ Post, do: body) do + quote do + Code.compiler_options(ignore_module_conflict: true) + + defmodule unquote(mod) do + use Ash.Resource, + domain: nil, + data_layer: AshSqlite.DataLayer + + sqlite do + table "posts" + repo(AshSqlite.TestRepo) + + custom_indexes do + # need one without any opts + index(["id"]) + index(["id"], unique: true, name: "test_unique_index") + end + end + + actions do + defaults([:create, :read, :update, :destroy]) + end + + unquote(body) + end + + Code.compiler_options(ignore_module_conflict: false) + end + end + + defmacrop defdomain(resources) do + quote do + Code.compiler_options(ignore_module_conflict: true) + + defmodule Domain do + use Ash.Domain + + resources do + for resource <- unquote(resources) do + resource(resource) + end + end + end + + Code.compiler_options(ignore_module_conflict: false) + end + end + + describe "creating initial snapshots" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + + defposts do + sqlite do + migration_types(second_title: {:varchar, 16}) + migration_defaults(title_with_default: "\"fred\"") + end + + identities do + identity(:title, [:title]) + identity(:thing, [:title, :second_title]) + identity(:thing_with_source, [:title, :title_with_source]) + end + + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + attribute(:second_title, :string) + attribute(:title_with_source, :string, source: :t_w_s) + attribute(:title_with_default, :string) + attribute(:email, Test.Support.Types.Email) + end + end + + defdomain([Post]) + + Mix.shell(Mix.Shell.Process) + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + :ok + end + + test "the migration sets up resources correctly" do + # the snapshot exists and contains valid json + assert File.read!(Path.wildcard("test_snapshots_path/test_repo/posts/*.json")) + |> Jason.decode!(keys: :atoms!) + + assert [file] = Path.wildcard("test_migration_path/**/*_migrate_resources*.exs") + + file_contents = File.read!(file) + + # the migration creates the table + assert file_contents =~ "create table(:posts, primary_key: false) do" + + # the migration sets up the custom_indexes + assert file_contents =~ + ~S{create index(:posts, ["id"], name: "test_unique_index", unique: true)} + + assert file_contents =~ ~S{create index(:posts, ["id"]} + + # the migration adds the id, with its default + assert file_contents =~ + ~S[add :id, :uuid, null: false, primary_key: true] + + # the migration adds the id, with its default + assert file_contents =~ + ~S[add :title_with_default, :text, default: "fred"] + + # the migration adds other attributes + assert file_contents =~ ~S[add :title, :text] + + # the migration unwraps newtypes + assert file_contents =~ ~S[add :email, :text] + + # the migration adds custom attributes + assert file_contents =~ ~S[add :second_title, :varchar, size: 16] + + # the migration creates unique_indexes based on the identities of the resource + assert file_contents =~ ~S{create unique_index(:posts, [:title], name: "posts_title_index")} + + # the migration creates unique_indexes based on the identities of the resource + assert file_contents =~ + ~S{create unique_index(:posts, [:title, :second_title], name: "posts_thing_index")} + + # the migration creates unique_indexes using the `source` on the attributes of the identity on the resource + assert file_contents =~ + ~S{create unique_index(:posts, [:title, :t_w_s], name: "posts_thing_with_source_index")} + end + end + + describe "creating follow up migrations" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + + defposts do + identities do + identity(:title, [:title]) + end + + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + end + end + + defdomain([Post]) + + Mix.shell(Mix.Shell.Process) + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + :ok + end + + test "when renaming an index, it is properly renamed" do + defposts do + sqlite do + identity_index_names(title: "titles_r_unique_dawg") + end + + identities do + identity(:title, [:title]) + end + + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + end + end + + defdomain([Post]) + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + assert File.read!(file2) =~ + ~S[ALTER INDEX posts_title_index RENAME TO titles_r_unique_dawg] + end + + test "when adding a field, it adds the field" do + defposts do + identities do + identity(:title, [:title]) + end + + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + attribute(:name, :string, allow_nil?: false) + end + end + + defdomain([Post]) + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + assert File.read!(file2) =~ + ~S[add :name, :text, null: false] + end + + test "when renaming a field, it asks if you are renaming it, and renames it if you are" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:name, :string, allow_nil?: false) + end + end + + defdomain([Post]) + + send(self(), {:mix_shell_input, :yes?, true}) + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + assert File.read!(file2) =~ ~S[rename table(:posts), :title, to: :name] + end + + test "when renaming a field, it asks if you are renaming it, and adds it if you aren't" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:name, :string, allow_nil?: false) + end + end + + defdomain([Post]) + + send(self(), {:mix_shell_input, :yes?, false}) + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + assert File.read!(file2) =~ + ~S[add :name, :text, null: false] + end + + test "when renaming a field, it asks which field you are renaming it to, and renames it if you are" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:name, :string, allow_nil?: false) + attribute(:subject, :string, allow_nil?: false) + end + end + + defdomain([Post]) + + send(self(), {:mix_shell_input, :yes?, true}) + send(self(), {:mix_shell_input, :prompt, "subject"}) + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + # Up migration + assert File.read!(file2) =~ ~S[rename table(:posts), :title, to: :subject] + + # Down migration + assert File.read!(file2) =~ ~S[rename table(:posts), :subject, to: :title] + end + + test "when renaming a field, it asks which field you are renaming it to, and adds it if you arent" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:name, :string, allow_nil?: false) + attribute(:subject, :string, allow_nil?: false) + end + end + + defdomain([Post]) + + send(self(), {:mix_shell_input, :yes?, false}) + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + assert File.read!(file2) =~ + ~S[add :subject, :text, null: false] + end + + test "when an attribute exists only on some of the resources that use the same table, it isn't marked as null: false" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + attribute(:example, :string, allow_nil?: false) + end + end + + defposts Post2 do + attributes do + uuid_primary_key(:id) + end + end + + defdomain([Post, Post2]) + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + assert File.read!(file2) =~ + ~S[add :example, :text] <> "\n" + + refute File.read!(file2) =~ ~S[null: false] + end + end + + describe "auto incrementing integer, when generated" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + + defposts do + attributes do + attribute(:id, :integer, generated?: true, allow_nil?: false, primary_key?: true) + attribute(:views, :integer) + end + end + + defdomain([Post]) + + Mix.shell(Mix.Shell.Process) + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + :ok + end + + test "when an integer is generated and default nil, it is a bigserial" do + assert [file] = Path.wildcard("test_migration_path/**/*_migrate_resources*.exs") + + assert File.read!(file) =~ + ~S[add :id, :bigserial, null: false, primary_key: true] + + assert File.read!(file) =~ + ~S[add :views, :bigint] + end + end + + describe "--check option" do + setup do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + end + end + + defdomain([Post]) + + [domain: Domain] + end + + test "returns code(1) if snapshots and resources don't fit", %{domain: domain} do + assert catch_exit( + AshSqlite.MigrationGenerator.generate(domain, + snapshot_path: "test_snapshot_path", + migration_path: "test_migration_path", + check: true + ) + ) == {:shutdown, 1} + + refute File.exists?(Path.wildcard("test_migration_path2/**/*_migrate_resources*.exs")) + refute File.exists?(Path.wildcard("test_snapshots_path2/test_repo/posts/*.json")) + end + end + + describe "references" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + end + + test "references are inferred automatically" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + attribute(:foobar, :string) + end + end + + defposts Post2 do + attributes do + uuid_primary_key(:id) + attribute(:name, :string) + end + + relationships do + belongs_to(:post, Post) + end + end + + defdomain([Post, Post2]) + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [file] = Path.wildcard("test_migration_path/**/*_migrate_resources*.exs") + + assert File.read!(file) =~ + ~S[references(:posts, column: :id, name: "posts_post_id_fkey", type: :uuid)] + end + + test "references are inferred automatically if the attribute has a different type" do + defposts do + attributes do + attribute(:id, :string, primary_key?: true, allow_nil?: false) + attribute(:title, :string) + attribute(:foobar, :string) + end + end + + defposts Post2 do + attributes do + attribute(:id, :string, primary_key?: true, allow_nil?: false) + attribute(:name, :string) + end + + relationships do + belongs_to(:post, Post, attribute_type: :string) + end + end + + defdomain([Post, Post2]) + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [file] = Path.wildcard("test_migration_path/**/*_migrate_resources*.exs") + + assert File.read!(file) =~ + ~S[references(:posts, column: :id, name: "posts_post_id_fkey", type: :text)] + end + + test "when modified, the foreign key is dropped before modification" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + attribute(:foobar, :string) + end + end + + defposts Post2 do + attributes do + uuid_primary_key(:id) + attribute(:name, :string) + end + + relationships do + belongs_to(:post, Post) + end + end + + defdomain([Post, Post2]) + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + defposts Post2 do + sqlite do + references do + reference(:post, name: "special_post_fkey", on_delete: :delete, on_update: :update) + end + end + + attributes do + uuid_primary_key(:id) + attribute(:name, :string) + end + + relationships do + belongs_to(:post, Post) + end + end + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert file = + "test_migration_path/**/*_migrate_resources*.exs" + |> Path.wildcard() + |> Enum.sort() + |> Enum.at(1) + |> File.read!() + + assert file =~ + ~S[references(:posts, column: :id, name: "special_post_fkey", type: :uuid, on_delete: :delete_all, on_update: :update_all)] + + assert file =~ ~S[drop constraint(:posts, "posts_post_id_fkey")] + + assert [_, down_code] = String.split(file, "def down do") + + assert [_, after_drop] = + String.split(down_code, "drop constraint(:posts, \"special_post_fkey\")") + + assert after_drop =~ ~S[references(:posts] + end + end + + describe "polymorphic resources" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + + defmodule Comment do + use Ash.Resource, + domain: nil, + data_layer: AshSqlite.DataLayer + + sqlite do + polymorphic?(true) + repo(AshSqlite.TestRepo) + end + + attributes do + uuid_primary_key(:id) + attribute(:resource_id, :uuid) + end + + actions do + defaults([:create, :read, :update, :destroy]) + end + end + + defmodule Post do + use Ash.Resource, + domain: nil, + data_layer: AshSqlite.DataLayer + + sqlite do + table "posts" + repo(AshSqlite.TestRepo) + end + + actions do + defaults([:create, :read, :update, :destroy]) + end + + attributes do + uuid_primary_key(:id) + end + + relationships do + has_many(:comments, Comment, + destination_attribute: :resource_id, + relationship_context: %{data_layer: %{table: "post_comments"}} + ) + + belongs_to(:best_comment, Comment, + destination_attribute: :id, + relationship_context: %{data_layer: %{table: "post_comments"}} + ) + end + end + + defdomain([Post, Comment]) + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + [domain: Domain] + end + + test "it uses the relationship's table context if it is set" do + assert [file] = Path.wildcard("test_migration_path/**/*_migrate_resources*.exs") + + assert File.read!(file) =~ + ~S[references(:post_comments, column: :id, name: "posts_best_comment_id_fkey", type: :uuid)] + end + end + + describe "default values" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + end + + test "when default value is specified that has no impl" do + defposts do + attributes do + uuid_primary_key(:id) + attribute(:product_code, :term, default: {"xyz"}) + end + end + + defdomain([Post]) + + capture_log(fn -> + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + end) + + assert [file1] = Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + file = File.read!(file1) + + assert file =~ + ~S[add :product_code, :binary] + end + end + + describe "follow up with references" do + setup do + on_exit(fn -> + File.rm_rf!("test_snapshots_path") + File.rm_rf!("test_migration_path") + end) + + defposts do + attributes do + uuid_primary_key(:id) + attribute(:title, :string) + end + end + + defmodule Comment do + use Ash.Resource, + domain: nil, + data_layer: AshSqlite.DataLayer + + sqlite do + table "comments" + repo AshSqlite.TestRepo + end + + attributes do + uuid_primary_key(:id) + end + + relationships do + belongs_to(:post, Post) + end + end + + defdomain([Post, Comment]) + + Mix.shell(Mix.Shell.Process) + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + :ok + end + + test "when changing the primary key, it changes properly" do + defposts do + attributes do + attribute(:id, :uuid, primary_key?: false, default: &Ecto.UUID.generate/0) + uuid_primary_key(:guid) + attribute(:title, :string) + end + end + + defmodule Comment do + use Ash.Resource, + domain: nil, + data_layer: AshSqlite.DataLayer + + sqlite do + table "comments" + repo AshSqlite.TestRepo + end + + attributes do + uuid_primary_key(:id) + end + + relationships do + belongs_to(:post, Post) + end + end + + defdomain([Post, Comment]) + + AshSqlite.MigrationGenerator.generate(Domain, + snapshot_path: "test_snapshots_path", + migration_path: "test_migration_path", + quiet: true, + format: false + ) + + assert [_file1, file2] = + Enum.sort(Path.wildcard("test_migration_path/**/*_migrate_resources*.exs")) + + file = File.read!(file2) + + assert [before_index_drop, after_index_drop] = + String.split(file, ~S[drop constraint("posts", "posts_pkey")], parts: 2) + + assert before_index_drop =~ ~S[drop constraint(:comments, "comments_post_id_fkey")] + + assert after_index_drop =~ ~S[modify :id, :uuid, null: true, primary_key: false] + + assert after_index_drop =~ + ~S[modify :post_id, references(:posts, column: :id, name: "comments_post_id_fkey", type: :uuid)] + end + end +end diff --git a/test/polymorphism_test.exs b/test/polymorphism_test.exs new file mode 100644 index 0000000..519a0ea --- /dev/null +++ b/test/polymorphism_test.exs @@ -0,0 +1,29 @@ +defmodule AshSqlite.PolymorphismTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Post, Rating} + + require Ash.Query + + test "you can create related data" do + Post + |> Ash.Changeset.for_create(:create, rating: %{score: 10}) + |> Ash.create!() + + assert [%{score: 10}] = + Rating + |> Ash.Query.set_context(%{data_layer: %{table: "post_ratings"}}) + |> Ash.read!() + end + + test "you can read related data" do + Post + |> Ash.Changeset.for_create(:create, rating: %{score: 10}) + |> Ash.create!() + + assert [%{score: 10}] = + Post + |> Ash.Query.load(:ratings) + |> Ash.read_one!() + |> Map.get(:ratings) + end +end diff --git a/test/primary_key_test.exs b/test/primary_key_test.exs new file mode 100644 index 0000000..40b5340 --- /dev/null +++ b/test/primary_key_test.exs @@ -0,0 +1,52 @@ +defmodule AshSqlite.Test.PrimaryKeyTest do + @moduledoc false + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{IntegerPost, Post, PostView} + + require Ash.Query + + test "creates record with integer primary key" do + assert %IntegerPost{} = + IntegerPost |> Ash.Changeset.for_create(:create, %{title: "title"}) |> Ash.create!() + end + + test "creates record with uuid primary key" do + assert %Post{} = Post |> Ash.Changeset.for_create(:create, %{title: "title"}) |> Ash.create!() + end + + describe "resources without a primary key" do + test "records can be created" do + post = + Post + |> Ash.Changeset.for_action(:create, %{title: "not very interesting"}) + |> Ash.create!() + + assert {:ok, view} = + PostView + |> Ash.Changeset.for_action(:create, %{browser: :firefox, post_id: post.id}) + |> Ash.create() + + assert view.browser == :firefox + assert view.post_id == post.id + assert DateTime.diff(DateTime.utc_now(), view.time, :microsecond) < 1_000_000 + end + + test "records can be queried" do + post = + Post + |> Ash.Changeset.for_action(:create, %{title: "not very interesting"}) + |> Ash.create!() + + expected = + PostView + |> Ash.Changeset.for_action(:create, %{browser: :firefox, post_id: post.id}) + |> Ash.create!() + + assert {:ok, [actual]} = Ash.read(PostView) + + assert actual.time == expected.time + assert actual.browser == expected.browser + assert actual.post_id == expected.post_id + end + end +end diff --git a/test/select_test.exs b/test/select_test.exs new file mode 100644 index 0000000..85af50a --- /dev/null +++ b/test/select_test.exs @@ -0,0 +1,15 @@ +defmodule AshSqlite.SelectTest do + @moduledoc false + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.Post + + require Ash.Query + + test "values not selected in the query are not present in the response" do + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.create!() + + assert [%{title: %Ash.NotLoaded{}}] = Ash.read!(Ash.Query.select(Post, :id)) + end +end diff --git a/test/sort_test.exs b/test/sort_test.exs new file mode 100644 index 0000000..c17f4b3 --- /dev/null +++ b/test/sort_test.exs @@ -0,0 +1,175 @@ +defmodule AshSqlite.SortTest do + @moduledoc false + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.{Comment, Post, PostLink} + + require Ash.Query + + test "multi-column sorts work" do + Post + |> Ash.Changeset.for_create(:create, %{title: "aaa", score: 0}) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "aaa", score: 1}) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "bbb", score: 0}) + |> Ash.create!() + + assert [ + %{title: "aaa", score: 0}, + %{title: "aaa", score: 1}, + %{title: "bbb"} + ] = + Ash.read!( + Post + |> Ash.Query.sort(title: :asc, score: :asc) + ) + end + + test "multi-column sorts work on inclusion" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "aaa", score: 0}) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "aaa", score: 1}) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "bbb", score: 0}) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "aaa", likes: 1}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "bbb", likes: 1}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + Comment + |> Ash.Changeset.for_create(:create, %{title: "aaa", likes: 2}) + |> Ash.Changeset.manage_relationship(:post, post, type: :append_and_remove) + |> Ash.create!() + + posts = + Post + |> Ash.Query.load( + comments: + Comment + |> Ash.Query.sort([:title, :likes]) + |> Ash.Query.select([:title, :likes]) + |> Ash.Query.limit(1) + ) + |> Ash.Query.sort([:title, :score]) + |> Ash.read!() + + assert [ + %{title: "aaa", comments: [%{title: "aaa"}]}, + %{title: "aaa"}, + %{title: "bbb"} + ] = posts + end + + test "multicolumn sort works with a select statement" do + Post + |> Ash.Changeset.for_create(:create, %{title: "aaa", score: 0}) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "aaa", score: 1}) + |> Ash.create!() + + Post + |> Ash.Changeset.for_create(:create, %{title: "bbb", score: 0}) + |> Ash.create!() + + assert [ + %{title: "aaa", score: 0}, + %{title: "aaa", score: 1}, + %{title: "bbb"} + ] = + Ash.read!( + Post + |> Ash.Query.sort(title: :asc, score: :asc) + |> Ash.Query.select([:title, :score]) + ) + end + + test "sorting when joining to a many to many relationship sorts properly" do + post1 = + Post + |> Ash.Changeset.for_create(:create, %{title: "aaa", score: 0}) + |> Ash.create!() + + post2 = + Post + |> Ash.Changeset.for_create(:create, %{title: "bbb", score: 1}) + |> Ash.create!() + + post3 = + Post + |> Ash.Changeset.for_create(:create, %{title: "ccc", score: 0}) + |> Ash.create!() + + PostLink + |> Ash.Changeset.new() + |> Ash.Changeset.manage_relationship(:source_post, post1, type: :append) + |> Ash.Changeset.manage_relationship(:destination_post, post3, type: :append) + |> Ash.create!() + + PostLink + |> Ash.Changeset.new() + |> Ash.Changeset.manage_relationship(:source_post, post2, type: :append) + |> Ash.Changeset.manage_relationship(:destination_post, post2, type: :append) + |> Ash.create!() + + PostLink + |> Ash.Changeset.new() + |> Ash.Changeset.manage_relationship(:source_post, post3, type: :append) + |> Ash.Changeset.manage_relationship(:destination_post, post1, type: :append) + |> Ash.create!() + + assert [ + %{title: "aaa"}, + %{title: "bbb"}, + %{title: "ccc"} + ] = + Ash.read!( + Post + |> Ash.Query.sort(title: :asc) + |> Ash.Query.filter(linked_posts.title in ["aaa", "bbb", "ccc"]) + ) + + assert [ + %{title: "ccc"}, + %{title: "bbb"}, + %{title: "aaa"} + ] = + Ash.read!( + Post + |> Ash.Query.sort(title: :desc) + |> Ash.Query.filter(linked_posts.title in ["aaa", "bbb", "ccc"] or title == "aaa") + ) + + assert [ + %{title: "ccc"}, + %{title: "bbb"}, + %{title: "aaa"} + ] = + Ash.read!( + Post + |> Ash.Query.sort(title: :desc) + |> Ash.Query.filter( + linked_posts.title in ["aaa", "bbb", "ccc"] or + post_links.source_post_id == ^post2.id + ) + ) + end +end diff --git a/test/support/concat.ex b/test/support/concat.ex new file mode 100644 index 0000000..0977a28 --- /dev/null +++ b/test/support/concat.ex @@ -0,0 +1,35 @@ +defmodule AshSqlite.Test.Concat do + @moduledoc false + use Ash.Resource.Calculation + require Ash.Query + + def init(opts) do + if opts[:keys] && is_list(opts[:keys]) && Enum.all?(opts[:keys], &is_atom/1) do + {:ok, opts} + else + {:error, "Expected a `keys` option for which keys to concat"} + end + end + + def expression(opts, %{arguments: %{separator: separator}}) do + Enum.reduce(opts[:keys], nil, fn key, expr -> + if expr do + if separator do + expr(^expr <> ^separator <> ^ref(key)) + else + expr(^expr <> ^ref(key)) + end + else + expr(^ref(key)) + end + end) + end + + def calculate(records, opts, %{separator: separator}) do + Enum.map(records, fn record -> + Enum.map_join(opts[:keys], separator, fn key -> + to_string(Map.get(record, key)) + end) + end) + end +end diff --git a/test/support/domain.ex b/test/support/domain.ex new file mode 100644 index 0000000..90c0680 --- /dev/null +++ b/test/support/domain.ex @@ -0,0 +1,23 @@ +defmodule AshSqlite.Test.Domain do + @moduledoc false + use Ash.Domain + + resources do + resource(AshSqlite.Test.Post) + resource(AshSqlite.Test.Comment) + resource(AshSqlite.Test.IntegerPost) + resource(AshSqlite.Test.Rating) + resource(AshSqlite.Test.PostLink) + resource(AshSqlite.Test.PostView) + resource(AshSqlite.Test.Author) + resource(AshSqlite.Test.Profile) + resource(AshSqlite.Test.User) + resource(AshSqlite.Test.Account) + resource(AshSqlite.Test.Organization) + resource(AshSqlite.Test.Manager) + end + + authorization do + authorize(:when_requested) + end +end diff --git a/test/support/relationships/comments_containing_title.ex b/test/support/relationships/comments_containing_title.ex new file mode 100644 index 0000000..d80d049 --- /dev/null +++ b/test/support/relationships/comments_containing_title.ex @@ -0,0 +1,48 @@ +defmodule AshSqlite.Test.Post.CommentsContainingTitle do + @moduledoc false + + use Ash.Resource.ManualRelationship + use AshSqlite.ManualRelationship + require Ash.Query + require Ecto.Query + + def load(posts, _opts, %{query: query, actor: actor, authorize?: authorize?}) do + post_ids = Enum.map(posts, & &1.id) + + {:ok, + query + |> Ash.Query.filter(post_id in ^post_ids) + |> Ash.Query.filter(contains(title, post.title)) + |> Ash.read!(actor: actor, authorize?: authorize?) + |> Enum.group_by(& &1.post_id)} + end + + def ash_sqlite_join(query, _opts, current_binding, as_binding, :inner, destination_query) do + {:ok, + Ecto.Query.from(_ in query, + join: dest in ^destination_query, + as: ^as_binding, + on: dest.post_id == as(^current_binding).id, + on: fragment("instr(?, ?) > 0", dest.title, as(^current_binding).title) + )} + end + + def ash_sqlite_join(query, _opts, current_binding, as_binding, :left, destination_query) do + {:ok, + Ecto.Query.from(_ in query, + left_join: dest in ^destination_query, + as: ^as_binding, + on: dest.post_id == as(^current_binding).id, + on: fragment("instr(?, ?) > 0", dest.title, as(^current_binding).title) + )} + end + + def ash_sqlite_subquery(_opts, current_binding, as_binding, destination_query) do + {:ok, + Ecto.Query.from(_ in destination_query, + where: parent_as(^current_binding).id == as(^as_binding).post_id, + where: + fragment("instr(?, ?) > 0", as(^as_binding).title, parent_as(^current_binding).title) + )} + end +end diff --git a/test/support/repo_case.ex b/test/support/repo_case.ex new file mode 100644 index 0000000..a405788 --- /dev/null +++ b/test/support/repo_case.ex @@ -0,0 +1,28 @@ +defmodule AshSqlite.RepoCase do + @moduledoc false + use ExUnit.CaseTemplate + + alias Ecto.Adapters.SQL.Sandbox + + using do + quote do + alias AshSqlite.TestRepo + + import Ecto + import Ecto.Query + import AshSqlite.RepoCase + + # and any other stuff + end + end + + setup tags do + :ok = Sandbox.checkout(AshSqlite.TestRepo) + + unless tags[:async] do + Sandbox.mode(AshSqlite.TestRepo, {:shared, self()}) + end + + :ok + end +end diff --git a/test/support/resources/account.ex b/test/support/resources/account.ex new file mode 100644 index 0000000..92903ce --- /dev/null +++ b/test/support/resources/account.ex @@ -0,0 +1,32 @@ +defmodule AshSqlite.Test.Account do + @moduledoc false + use Ash.Resource, domain: AshSqlite.Test.Domain, data_layer: AshSqlite.DataLayer + + actions do + default_accept(:*) + defaults([:create, :read, :update, :destroy]) + end + + attributes do + uuid_primary_key(:id) + attribute(:is_active, :boolean, public?: true) + end + + calculations do + calculate( + :active, + :boolean, + expr(is_active), + public?: true + ) + end + + sqlite do + table "accounts" + repo(AshSqlite.TestRepo) + end + + relationships do + belongs_to(:user, AshSqlite.Test.User, public?: true) + end +end diff --git a/test/support/resources/author.ex b/test/support/resources/author.ex new file mode 100644 index 0000000..df6c88d --- /dev/null +++ b/test/support/resources/author.ex @@ -0,0 +1,74 @@ +defmodule AshSqlite.Test.Author do + @moduledoc false + use Ash.Resource, + domain: AshSqlite.Test.Domain, + data_layer: AshSqlite.DataLayer + + sqlite do + table("authors") + repo(AshSqlite.TestRepo) + end + + attributes do + uuid_primary_key(:id, writable?: true) + attribute(:first_name, :string, public?: true) + attribute(:last_name, :string, public?: true) + attribute(:bio, AshSqlite.Test.Bio, public?: true) + #attribute(:badges, {:array, :atom}, public?: true) + end + + actions do + default_accept(:*) + defaults([:create, :read, :update, :destroy]) + end + + relationships do + has_one(:profile, AshSqlite.Test.Profile, public?: true) + has_many(:posts, AshSqlite.Test.Post, public?: true) + end + + calculations do + calculate(:title, :string, expr(bio[:title])) + calculate(:full_name, :string, expr(first_name <> " " <> last_name)) + # calculate(:full_name_with_nils, :string, expr(string_join([first_name, last_name], " "))) + # calculate(:full_name_with_nils_no_joiner, :string, expr(string_join([first_name, last_name]))) + # calculate(:split_full_name, {:array, :string}, expr(string_split(full_name))) + + calculate(:first_name_or_bob, :string, expr(first_name || "bob")) + calculate(:first_name_and_bob, :string, expr(first_name && "bob")) + + calculate( + :conditional_full_name, + :string, + expr( + if( + is_nil(first_name) or is_nil(last_name), + "(none)", + first_name <> " " <> last_name + ) + ) + ) + + calculate( + :nested_conditional, + :string, + expr( + if( + is_nil(first_name), + "No First Name", + if( + is_nil(last_name), + "No Last Name", + first_name <> " " <> last_name + ) + ) + ) + ) + + calculate :param_full_name, + :string, + {AshSqlite.Test.Concat, keys: [:first_name, :last_name]} do + argument(:separator, :string, default: " ", constraints: [allow_empty?: true, trim?: false]) + end + end +end diff --git a/test/support/resources/bio.ex b/test/support/resources/bio.ex new file mode 100644 index 0000000..ce87602 --- /dev/null +++ b/test/support/resources/bio.ex @@ -0,0 +1,21 @@ +defmodule AshSqlite.Test.Bio do + @moduledoc false + use Ash.Resource, data_layer: :embedded + + actions do + default_accept(:*) + defaults([:create, :read, :update, :destroy]) + end + + attributes do + attribute(:title, :string, public?: true) + attribute(:bio, :string, public?: true) + attribute(:years_of_experience, :integer, public?: true) + + attribute :list_of_strings, {:array, :string} do + public?(true) + allow_nil?(true) + default(nil) + end + end +end diff --git a/test/support/resources/comment.ex b/test/support/resources/comment.ex new file mode 100644 index 0000000..7c6e2fb --- /dev/null +++ b/test/support/resources/comment.ex @@ -0,0 +1,63 @@ +defmodule AshSqlite.Test.Comment do + @moduledoc false + use Ash.Resource, + domain: AshSqlite.Test.Domain, + data_layer: AshSqlite.DataLayer, + authorizers: [ + Ash.Policy.Authorizer + ] + + policies do + bypass action_type(:read) do + # Check that the comment is in the same org (via post) as actor + authorize_if(relates_to_actor_via([:post, :organization, :users])) + end + end + + sqlite do + table "comments" + repo(AshSqlite.TestRepo) + + references do + reference(:post, on_delete: :delete, on_update: :update, name: "special_name_fkey") + end + end + + actions do + default_accept(:*) + defaults([:read, :update, :destroy]) + + create :create do + primary?(true) + argument(:rating, :map) + + change(manage_relationship(:rating, :ratings, on_missing: :ignore, on_match: :create)) + end + end + + attributes do + uuid_primary_key(:id) + attribute(:title, :string, public?: true) + attribute(:likes, :integer, public?: true) + attribute(:arbitrary_timestamp, :utc_datetime_usec, public?: true) + create_timestamp(:created_at, writable?: true, public?: true) + end + + relationships do + belongs_to(:post, AshSqlite.Test.Post, public?: true) + belongs_to(:author, AshSqlite.Test.Author, public?: true) + + has_many(:ratings, AshSqlite.Test.Rating, + public?: true, + destination_attribute: :resource_id, + relationship_context: %{data_layer: %{table: "comment_ratings"}} + ) + + has_many(:popular_ratings, AshSqlite.Test.Rating, + public?: true, + destination_attribute: :resource_id, + relationship_context: %{data_layer: %{table: "comment_ratings"}}, + filter: expr(score > 5) + ) + end +end diff --git a/test/support/resources/integer_post.ex b/test/support/resources/integer_post.ex new file mode 100644 index 0000000..874bfa3 --- /dev/null +++ b/test/support/resources/integer_post.ex @@ -0,0 +1,21 @@ +defmodule AshSqlite.Test.IntegerPost do + @moduledoc false + use Ash.Resource, + domain: AshSqlite.Test.Domain, + data_layer: AshSqlite.DataLayer + + sqlite do + table "integer_posts" + repo AshSqlite.TestRepo + end + + actions do + default_accept(:*) + defaults([:create, :read, :update, :destroy]) + end + + attributes do + integer_primary_key(:id) + attribute(:title, :string, public?: true) + end +end diff --git a/test/support/resources/manager.ex b/test/support/resources/manager.ex new file mode 100644 index 0000000..725f596 --- /dev/null +++ b/test/support/resources/manager.ex @@ -0,0 +1,42 @@ +defmodule AshSqlite.Test.Manager do + @moduledoc false + use Ash.Resource, + domain: AshSqlite.Test.Domain, + data_layer: AshSqlite.DataLayer + + sqlite do + table("managers") + repo(AshSqlite.TestRepo) + end + + actions do + default_accept(:*) + defaults([:read, :update, :destroy]) + + create :create do + primary?(true) + argument(:organization_id, :uuid, allow_nil?: false) + + change(manage_relationship(:organization_id, :organization, type: :append_and_remove)) + end + end + + identities do + identity(:uniq_code, :code) + end + + attributes do + uuid_primary_key(:id) + attribute(:name, :string, public?: true) + attribute(:code, :string, allow_nil?: false, public?: true) + attribute(:must_be_present, :string, allow_nil?: false, public?: true) + attribute(:role, :string, public?: true) + end + + relationships do + belongs_to :organization, AshSqlite.Test.Organization do + public?(true) + attribute_writable?(true) + end + end +end diff --git a/test/support/resources/organization.ex b/test/support/resources/organization.ex new file mode 100644 index 0000000..f2a1524 --- /dev/null +++ b/test/support/resources/organization.ex @@ -0,0 +1,21 @@ +defmodule AshSqlite.Test.Organization do + @moduledoc false + use Ash.Resource, + domain: AshSqlite.Test.Domain, + data_layer: AshSqlite.DataLayer + + sqlite do + table("orgs") + repo(AshSqlite.TestRepo) + end + + actions do + default_accept(:*) + defaults([:create, :read, :update, :destroy]) + end + + attributes do + uuid_primary_key(:id, writable?: true) + attribute(:name, :string, public?: true) + end +end diff --git a/test/support/resources/post.ex b/test/support/resources/post.ex new file mode 100644 index 0000000..968e121 --- /dev/null +++ b/test/support/resources/post.ex @@ -0,0 +1,235 @@ +defmodule AshSqlite.Test.Post do + @moduledoc false + use Ash.Resource, + domain: AshSqlite.Test.Domain, + data_layer: AshSqlite.DataLayer, + authorizers: [ + Ash.Policy.Authorizer + ] + + policies do + bypass action_type(:read) do + # Check that the post is in the same org as actor + authorize_if(relates_to_actor_via([:organization, :users])) + end + end + + sqlite do + table("posts") + repo(AshSqlite.TestRepo) + base_filter_sql("type = 'sponsored'") + + custom_indexes do + index([:uniq_custom_one, :uniq_custom_two], + unique: true, + message: "dude what the heck" + ) + end + end + + resource do + base_filter(expr(type == type(:sponsored, ^Ash.Type.Atom))) + end + + actions do + default_accept(:*) + defaults([:update, :destroy]) + + read :read do + primary?(true) + end + + read :paginated do + pagination(offset?: true, required?: true) + end + + create :create do + primary?(true) + argument(:rating, :map) + + change( + manage_relationship(:rating, :ratings, + on_missing: :ignore, + on_no_match: :create, + on_match: :create + ) + ) + end + + update :increment_score do + argument(:amount, :integer, default: 1) + change(atomic_update(:score, expr((score || 0) + ^arg(:amount)))) + end + end + + identities do + identity(:uniq_one_and_two, [:uniq_one, :uniq_two]) + end + + attributes do + uuid_primary_key(:id, writable?: true) + attribute(:title, :string, public?: true) + attribute(:score, :integer, public?: true) + attribute(:public, :boolean, public?: true) + attribute(:category, :ci_string, public?: true) + attribute(:type, :atom, default: :sponsored, writable?: false) + attribute(:price, :integer, public?: true) + attribute(:decimal, :decimal, default: Decimal.new(0), public?: true) + attribute(:status, AshSqlite.Test.Types.Status, public?: true) + attribute(:status_enum, AshSqlite.Test.Types.StatusEnum, public?: true) + + attribute(:status_enum_no_cast, AshSqlite.Test.Types.StatusEnumNoCast, + source: :status_enum, + public?: true + ) + + attribute(:stuff, :map, public?: true) + attribute(:uniq_one, :string, public?: true) + attribute(:uniq_two, :string, public?: true) + attribute(:uniq_custom_one, :string, public?: true) + attribute(:uniq_custom_two, :string, public?: true) + create_timestamp(:created_at) + update_timestamp(:updated_at) + end + + code_interface do + define(:get_by_id, action: :read, get_by: [:id]) + define(:increment_score, args: [{:optional, :amount}]) + end + + relationships do + belongs_to :organization, AshSqlite.Test.Organization do + public?(true) + attribute_writable?(true) + end + + belongs_to(:author, AshSqlite.Test.Author, public?: true) + + has_many(:comments, AshSqlite.Test.Comment, destination_attribute: :post_id, public?: true) + + has_many :comments_matching_post_title, AshSqlite.Test.Comment do + public?(true) + filter(expr(title == parent_expr(title))) + end + + has_many :popular_comments, AshSqlite.Test.Comment do + public?(true) + destination_attribute(:post_id) + filter(expr(likes > 10)) + end + + has_many :comments_containing_title, AshSqlite.Test.Comment do + public?(true) + manual(AshSqlite.Test.Post.CommentsContainingTitle) + end + + has_many(:ratings, AshSqlite.Test.Rating, + public?: true, + destination_attribute: :resource_id, + relationship_context: %{data_layer: %{table: "post_ratings"}} + ) + + has_many(:post_links, AshSqlite.Test.PostLink, + public?: true, + destination_attribute: :source_post_id, + filter: [state: :active] + ) + + many_to_many(:linked_posts, __MODULE__, + public?: true, + through: AshSqlite.Test.PostLink, + join_relationship: :post_links, + source_attribute_on_join_resource: :source_post_id, + destination_attribute_on_join_resource: :destination_post_id + ) + + has_many(:views, AshSqlite.Test.PostView, public?: true) + end + + validations do + validate(attribute_does_not_equal(:title, "not allowed")) + end + + calculations do + calculate(:score_after_winning, :integer, expr((score || 0) + 1)) + calculate(:negative_score, :integer, expr(-score)) + calculate(:category_label, :string, expr("(" <> category <> ")")) + calculate(:score_with_score, :string, expr(score <> score)) + calculate(:foo_bar_from_stuff, :string, expr(stuff[:foo][:bar])) + + calculate( + :score_map, + :map, + expr(%{ + negative_score: %{foo: negative_score, bar: negative_score} + }) + ) + + calculate( + :calc_returning_json, + AshSqlite.Test.Money, + expr( + fragment(""" + '{"amount":100, "currency": "usd"}' + """) + ) + ) + + calculate( + :was_created_in_the_last_month, + :boolean, + expr( + # This is written in a silly way on purpose, to test a regression + if( + fragment("(? <= (DATE(? - '+1 month')))", now(), created_at), + true, + false + ) + ) + ) + + calculate( + :price_string, + :string, + CalculatePostPriceString + ) + + calculate( + :price_string_with_currency_sign, + :string, + CalculatePostPriceStringWithSymbol + ) + end +end + +defmodule CalculatePostPriceString do + @moduledoc false + use Ash.Resource.Calculation + + @impl true + def load(_, _, _), do: [:price] + + @impl true + def calculate(records, _, _) do + Enum.map(records, fn %{price: price} -> + dollars = div(price, 100) + cents = rem(price, 100) + "#{dollars}.#{cents}" + end) + end +end + +defmodule CalculatePostPriceStringWithSymbol do + @moduledoc false + use Ash.Resource.Calculation + + @impl true + def load(_, _, _), do: [:price_string] + + @impl true + def calculate(records, _, _) do + Enum.map(records, fn %{price_string: price_string} -> + "#{price_string}$" + end) + end +end diff --git a/test/support/resources/post_link.ex b/test/support/resources/post_link.ex new file mode 100644 index 0000000..a794d73 --- /dev/null +++ b/test/support/resources/post_link.ex @@ -0,0 +1,42 @@ +defmodule AshSqlite.Test.PostLink do + @moduledoc false + use Ash.Resource, + domain: AshSqlite.Test.Domain, + data_layer: AshSqlite.DataLayer + + sqlite do + table "post_links" + repo AshSqlite.TestRepo + end + + actions do + default_accept(:*) + defaults([:create, :read, :update, :destroy]) + end + + identities do + identity(:unique_link, [:source_post_id, :destination_post_id]) + end + + attributes do + attribute :state, :atom do + public?(true) + constraints(one_of: [:active, :archived]) + default(:active) + end + end + + relationships do + belongs_to :source_post, AshSqlite.Test.Post do + public?(true) + allow_nil?(false) + primary_key?(true) + end + + belongs_to :destination_post, AshSqlite.Test.Post do + public?(true) + allow_nil?(false) + primary_key?(true) + end + end +end diff --git a/test/support/resources/post_views.ex b/test/support/resources/post_views.ex new file mode 100644 index 0000000..c87307a --- /dev/null +++ b/test/support/resources/post_views.ex @@ -0,0 +1,35 @@ +defmodule AshSqlite.Test.PostView do + @moduledoc false + use Ash.Resource, domain: AshSqlite.Test.Domain, data_layer: AshSqlite.DataLayer + + actions do + default_accept(:*) + defaults([:create, :read]) + end + + attributes do + create_timestamp(:time) + attribute(:browser, :atom, constraints: [one_of: [:firefox, :chrome, :edge]], public?: true) + end + + relationships do + belongs_to :post, AshSqlite.Test.Post do + public?(true) + allow_nil?(false) + attribute_writable?(true) + end + end + + resource do + require_primary_key?(false) + end + + sqlite do + table "post_views" + repo AshSqlite.TestRepo + + references do + reference :post, ignore?: true + end + end +end diff --git a/test/support/resources/profile.ex b/test/support/resources/profile.ex new file mode 100644 index 0000000..043a91a --- /dev/null +++ b/test/support/resources/profile.ex @@ -0,0 +1,25 @@ +defmodule AshSqlite.Test.Profile do + @moduledoc false + use Ash.Resource, + domain: AshSqlite.Test.Domain, + data_layer: AshSqlite.DataLayer + + sqlite do + table("profile") + repo(AshSqlite.TestRepo) + end + + attributes do + uuid_primary_key(:id, writable?: true) + attribute(:description, :string, public?: true) + end + + actions do + default_accept(:*) + defaults([:create, :read, :update, :destroy]) + end + + relationships do + belongs_to(:author, AshSqlite.Test.Author, public?: true) + end +end diff --git a/test/support/resources/rating.ex b/test/support/resources/rating.ex new file mode 100644 index 0000000..90f5760 --- /dev/null +++ b/test/support/resources/rating.ex @@ -0,0 +1,22 @@ +defmodule AshSqlite.Test.Rating do + @moduledoc false + use Ash.Resource, + domain: AshSqlite.Test.Domain, + data_layer: AshSqlite.DataLayer + + sqlite do + polymorphic?(true) + repo AshSqlite.TestRepo + end + + actions do + default_accept(:*) + defaults([:create, :read, :update, :destroy]) + end + + attributes do + uuid_primary_key(:id) + attribute(:score, :integer, public?: true) + attribute(:resource_id, :uuid, public?: true) + end +end diff --git a/test/support/resources/user.ex b/test/support/resources/user.ex new file mode 100644 index 0000000..7baab1c --- /dev/null +++ b/test/support/resources/user.ex @@ -0,0 +1,24 @@ +defmodule AshSqlite.Test.User do + @moduledoc false + use Ash.Resource, domain: AshSqlite.Test.Domain, data_layer: AshSqlite.DataLayer + + actions do + default_accept(:*) + defaults([:create, :read, :update, :destroy]) + end + + attributes do + uuid_primary_key(:id) + attribute(:is_active, :boolean, public?: true) + end + + sqlite do + table "users" + repo(AshSqlite.TestRepo) + end + + relationships do + belongs_to(:organization, AshSqlite.Test.Organization, public?: true) + has_many(:accounts, AshSqlite.Test.Account, public?: true) + end +end diff --git a/test/support/test_app.ex b/test/support/test_app.ex new file mode 100644 index 0000000..e074614 --- /dev/null +++ b/test/support/test_app.ex @@ -0,0 +1,13 @@ +defmodule AshSqlite.TestApp do + @moduledoc false + def start(_type, _args) do + children = [ + AshSqlite.TestRepo + ] + + # See https://hexdocs.pm/elixir/Supervisor.html + # for other strategies and supported options + opts = [strategy: :one_for_one, name: AshSqlite.Supervisor] + Supervisor.start_link(children, opts) + end +end diff --git a/test/support/test_custom_extension.ex b/test/support/test_custom_extension.ex new file mode 100644 index 0000000..a854a4e --- /dev/null +++ b/test/support/test_custom_extension.ex @@ -0,0 +1,38 @@ +defmodule AshSqlite.TestCustomExtension do + @moduledoc false + + use AshSqlite.CustomExtension, name: "demo-functions", latest_version: 1 + + @impl true + def install(0) do + """ + execute(\"\"\" + CREATE OR REPLACE FUNCTION ash_demo_functions() + RETURNS boolean AS $$ SELECT TRUE $$ + LANGUAGE SQL + IMMUTABLE; + \"\"\") + """ + end + + @impl true + def install(1) do + """ + execute(\"\"\" + CREATE OR REPLACE FUNCTION ash_demo_functions() + RETURNS boolean AS $$ SELECT FALSE $$ + LANGUAGE SQL + IMMUTABLE; + \"\"\") + """ + end + + @impl true + def uninstall(_version) do + """ + execute(\"\"\" + DROP FUNCTION IF EXISTS ash_demo_functions() + \"\"\") + """ + end +end diff --git a/test/support/test_repo.ex b/test/support/test_repo.ex new file mode 100644 index 0000000..fb51b46 --- /dev/null +++ b/test/support/test_repo.ex @@ -0,0 +1,5 @@ +defmodule AshSqlite.TestRepo do + @moduledoc false + use AshSqlite.Repo, + otp_app: :ash_sqlite +end diff --git a/test/support/types/email.ex b/test/support/types/email.ex new file mode 100644 index 0000000..f9fa483 --- /dev/null +++ b/test/support/types/email.ex @@ -0,0 +1,8 @@ +defmodule Test.Support.Types.Email do + @moduledoc false + use Ash.Type.NewType, + subtype_of: :string, + constraints: [ + casing: :lower + ] +end diff --git a/test/support/types/money.ex b/test/support/types/money.ex new file mode 100644 index 0000000..d576d6b --- /dev/null +++ b/test/support/types/money.ex @@ -0,0 +1,18 @@ +defmodule AshSqlite.Test.Money do + @moduledoc false + use Ash.Resource, + data_layer: :embedded + + attributes do + attribute :amount, :integer do + public?(true) + allow_nil?(false) + constraints(min: 0) + end + + attribute :currency, :atom do + public?(true) + constraints(one_of: [:eur, :usd]) + end + end +end diff --git a/test/support/types/status.ex b/test/support/types/status.ex new file mode 100644 index 0000000..38f422f --- /dev/null +++ b/test/support/types/status.ex @@ -0,0 +1,6 @@ +defmodule AshSqlite.Test.Types.Status do + @moduledoc false + use Ash.Type.Enum, values: [:open, :closed] + + def storage_type, do: :string +end diff --git a/test/support/types/status_enum.ex b/test/support/types/status_enum.ex new file mode 100644 index 0000000..e95a7c8 --- /dev/null +++ b/test/support/types/status_enum.ex @@ -0,0 +1,6 @@ +defmodule AshSqlite.Test.Types.StatusEnum do + @moduledoc false + use Ash.Type.Enum, values: [:open, :closed] + + def storage_type, do: :status +end diff --git a/test/support/types/status_enum_no_cast.ex b/test/support/types/status_enum_no_cast.ex new file mode 100644 index 0000000..2cd9974 --- /dev/null +++ b/test/support/types/status_enum_no_cast.ex @@ -0,0 +1,8 @@ +defmodule AshSqlite.Test.Types.StatusEnumNoCast do + @moduledoc false + use Ash.Type.Enum, values: [:open, :closed] + + def storage_type, do: :status + + def cast_in_query?, do: false +end diff --git a/test/test_helper.exs b/test/test_helper.exs new file mode 100644 index 0000000..5329339 --- /dev/null +++ b/test/test_helper.exs @@ -0,0 +1,6 @@ +ExUnit.start() +ExUnit.configure(stacktrace_depth: 100) + +AshSqlite.TestRepo.start_link() + +Ecto.Adapters.SQL.Sandbox.mode(AshSqlite.TestRepo, :manual) diff --git a/test/type_test.exs b/test/type_test.exs new file mode 100644 index 0000000..815eb4a --- /dev/null +++ b/test/type_test.exs @@ -0,0 +1,14 @@ +defmodule AshSqlite.Test.TypeTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.Post + + require Ash.Query + + test "uuids can be used as strings in fragments" do + uuid = Ash.UUID.generate() + + Post + |> Ash.Query.filter(fragment("? = ?", id, type(^uuid, :uuid))) + |> Ash.read!() + end +end diff --git a/test/unique_identity_test.exs b/test/unique_identity_test.exs new file mode 100644 index 0000000..6ef6d54 --- /dev/null +++ b/test/unique_identity_test.exs @@ -0,0 +1,45 @@ +defmodule AshSqlite.Test.UniqueIdentityTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.Post + + require Ash.Query + + test "unique constraint errors are properly caught" do + post = + Post + |> Ash.Changeset.for_create(:create, %{title: "title"}) + |> Ash.create!() + + assert_raise Ash.Error.Invalid, + ~r/Invalid value provided for id: has already been taken/, + fn -> + Post + |> Ash.Changeset.for_create(:create, %{id: post.id}) + |> Ash.create!() + end + end + + test "a unique constraint can be used to upsert when the resource has a base filter" do + post = + Post + |> Ash.Changeset.for_create(:create, %{ + title: "title", + uniq_one: "fred", + uniq_two: "astair", + price: 10 + }) + |> Ash.create!() + + new_post = + Post + |> Ash.Changeset.for_create(:create, %{ + title: "title2", + uniq_one: "fred", + uniq_two: "astair" + }) + |> Ash.create!(upsert?: true, upsert_identity: :uniq_one_and_two) + + assert new_post.id == post.id + assert new_post.price == 10 + end +end diff --git a/test/update_test.exs b/test/update_test.exs new file mode 100644 index 0000000..a9e81b8 --- /dev/null +++ b/test/update_test.exs @@ -0,0 +1,46 @@ +defmodule AshSqlite.Test.UpdateTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.Post + + require Ash.Query + + test "updating a record when multiple records are in the table will only update the desired record" do + # This test is here because of a previous bug in update that caused + # all records in the table to be updated. + id_1 = Ash.UUID.generate() + id_2 = Ash.UUID.generate() + + new_post_1 = + Post + |> Ash.Changeset.for_create(:create, %{ + id: id_1, + title: "new_post_1" + }) + |> Ash.create!() + + _new_post_2 = + Post + |> Ash.Changeset.for_create(:create, %{ + id: id_2, + title: "new_post_2" + }) + |> Ash.create!() + + {:ok, updated_post_1} = + new_post_1 + |> Ash.Changeset.for_update(:update, %{ + title: "new_post_1_updated" + }) + |> Ash.update() + + # It is deliberate that post 2 is re-fetched from the db after the + # update to post 1. This ensure that post 2 was not updated. + post_2 = Ash.get!(Post, id_2) + + assert updated_post_1.id == id_1 + assert updated_post_1.title == "new_post_1_updated" + + assert post_2.id == id_2 + assert post_2.title == "new_post_2" + end +end diff --git a/test/upsert_test.exs b/test/upsert_test.exs new file mode 100644 index 0000000..cde27e8 --- /dev/null +++ b/test/upsert_test.exs @@ -0,0 +1,60 @@ +defmodule AshSqlite.Test.UpsertTest do + use AshSqlite.RepoCase, async: false + alias AshSqlite.Test.Post + + require Ash.Query + + test "upserting results in the same created_at timestamp, but a new updated_at timestamp" do + id = Ash.UUID.generate() + + new_post = + Post + |> Ash.Changeset.for_create(:create, %{ + id: id, + title: "title2" + }) + |> Ash.create!(upsert?: true) + + assert new_post.id == id + assert new_post.created_at == new_post.updated_at + + updated_post = + Post + |> Ash.Changeset.for_create(:create, %{ + id: id, + title: "title2" + }) + |> Ash.create!(upsert?: true) + + assert updated_post.id == id + assert updated_post.created_at == new_post.created_at + assert updated_post.created_at != updated_post.updated_at + end + + test "upserting a field with a default sets to the new value" do + id = Ash.UUID.generate() + + new_post = + Post + |> Ash.Changeset.for_create(:create, %{ + id: id, + title: "title2" + }) + |> Ash.create!(upsert?: true) + + assert new_post.id == id + assert new_post.created_at == new_post.updated_at + + updated_post = + Post + |> Ash.Changeset.for_create(:create, %{ + id: id, + title: "title2", + decimal: Decimal.new(5) + }) + |> Ash.create!(upsert?: true) + + assert updated_post.id == id + assert Decimal.equal?(updated_post.decimal, Decimal.new(5)) + end +end