mirror of
https://github.com/supabase/supabase.git
synced 2026-05-06 08:56:46 -04:00
fix: typos + add more spelling exceptions (#33328)
Update apps/docs/content/troubleshooting/supabase--your-network-ipv4-and-ipv6-compatibility-cHe3BP.mdx Update supa-mdx-lint/Rule003Spelling.toml
This commit is contained in:
@@ -31,4 +31,4 @@ While pointing out the exact cause for egress may not be straightforward, there
|
||||
- Reduce the number of queries/calls by optimising client code or use caches to reduce the number of requests/queries being done: https://github.com/psteinroe/supabase-cache-helpers/
|
||||
- In case of update/insert queries, if you don’t need the entire row to be returned, configure your ORM/queries to not return the entire row
|
||||
- In case of running manual backups through Supavisor, remove unneeded tables and/or reduce the frequency
|
||||
- For Storage, if you start using the [smartCDN](https://supabase.com/docs/guides/storage/cdn/smart-cdn) Storage Egress usage can be managed. You can also use the [Supabase Image Transformations](https://supabase.com/docs/guides/storage/image-transformations) to optimize the images and reduce the egress.
|
||||
- For Storage, if you start using the [Smart CDN](https://supabase.com/docs/guides/storage/cdn/smart-cdn) Storage Egress usage can be managed. You can also use the [Supabase Image Transformations](https://supabase.com/docs/guides/storage/image-transformations) to optimize the images and reduce the egress.
|
||||
|
||||
+1
-1
@@ -25,6 +25,6 @@ We've documented some of the migrations that run into this issue and their corre
|
||||
|
||||
### Auth: `operator does not exist: uuid = text`
|
||||
|
||||
Temporary fix: Run `insert into auth.schema_migrations values ('20221208132122');` via the [sql editor](https://supabase.com/dashboard/project/_/sql/new) to fix the issue.
|
||||
Temporary fix: Run `insert into auth.schema_migrations values ('20221208132122');` via the [SQL editor](https://supabase.com/dashboard/project/_/sql/new) to fix the issue.
|
||||
|
||||
If the migration error you're seeing looks different, please reach out to [supabase.help](https://supabase.help/) for assistance.
|
||||
|
||||
+3
-3
@@ -14,7 +14,7 @@ http_status_code = 401
|
||||
message = "invalid claim: missing sub"
|
||||
---
|
||||
|
||||
The missing sub claim error is returned when supabase.auth.getUser() is called with an invalid JWT in the session or when the user attempts to register/sign in but hasn't completed the sign in when the getUser call is made.
|
||||
The missing sub claim error is returned when `supabase.auth.getUser()` is called with an invalid JWT in the session or when the user attempts to register/sign in but hasn't completed the sign in when the `getUser` call is made.
|
||||
|
||||
A common pitfall, is inadvertently using a Supabase API key (such as the anon or service_role keys) instead of the Supabase Auth access token.
|
||||
|
||||
@@ -24,8 +24,8 @@ A common pitfall, is inadvertently using a Supabase API key (such as the anon or
|
||||
|
||||
**How to Avoid This Issue:**
|
||||
|
||||
- Ensure that the token being passed to supabase.auth.getUser() is, indeed, an Auth access token and not one of the API keys.
|
||||
- Ensure that the token being passed to `supabase.auth.getUser()` is, indeed, an Auth access token and not one of the API keys.
|
||||
|
||||
- Are you creating the client on a per-request basis or are you creating a global client to be shared? If you're creating the client on a per-request basis, then you need to pass the session with the user's JWT from the client to the server somehow. This can be done by sending the user's JWT in a header like an `Authorization: Bearer <user_jwt>` . You can then get this header and call supabase.auth.getUser(user_jwt) with the user's JWT.
|
||||
- Are you creating the client on a per-request basis or are you creating a global client to be shared? If you're creating the client on a per-request basis, then you need to pass the session with the user's JWT from the client to the server somehow. This can be done by sending the user's JWT in a header like an `Authorization: Bearer <user_jwt>` . You can then get this header and call `supabase.auth.getUser(user_jwt)` with the user's JWT.
|
||||
|
||||
- Examine how the Supabase client is being initialized, especially in server-side scenarios.
|
||||
|
||||
+3
-3
@@ -23,7 +23,7 @@ The guide focuses on [psql](https://supabase.com/docs/guides/database/psql) but
|
||||
|
||||
Some other options include:
|
||||
|
||||
- [pgadmin](https://supabase.com/docs/guides/database/pgadmin)
|
||||
- [pgAdmin](https://supabase.com/docs/guides/database/pgadmin)
|
||||
- [DBeaver](https://supabase.com/docs/guides/database/dbeaver)
|
||||
|
||||
You can install PSQL in [macOS](https://stackoverflow.com/a/49689589/2188186) and [Windows](https://www.postgresql.org/download/windows/) by following these links and instructions. For Linux (Debian) you can run the following:
|
||||
@@ -57,6 +57,6 @@ Disable timeout
|
||||
set statement_timeout = '0';
|
||||
```
|
||||
|
||||
If your task is particularly long, you can may want to consider boosting your computing power temporarily. Compute size on Supabase is charged by the hour, so you can increase it for an hour or two, complete your task faster, then scale it back afterward.
|
||||
If your task is particularly long, you can may want to consider boosting your computing power temporarily. Compute size on Supabase is charged by the hour, so you can increase it for an hour or two, complete your task faster, then scale it back afterwards.
|
||||
|
||||
If you want to temporarily upgrade, you can find the add-ons for your project in your [Dashboard's Add-Ons Settings.](https://supabase.green/dashboard/project/_/settings/addons)
|
||||
If you want to temporarily upgrade, you can find the add-ons for your project in your [Dashboard's Add-ons Settings.](https://supabase.green/dashboard/project/_/settings/addons)
|
||||
|
||||
+1
-1
@@ -13,6 +13,6 @@ You can run this query to check the current settings set for your roles: `SELECT
|
||||
|
||||
To increase the `statement_timeout` for a specific role, you may follow the instructions [here](https://supabase.com/docs/guides/database/timeouts#changing-the-default-timeout). Note that it may require a quick reboot for the changes to take effect.
|
||||
|
||||
Additionally, to check how long a query is taking, you can check the Query Performance report which can give you more information on the query's performance: https://app.supabase.com/project/_/reports/query-performance. You can use the [query plan analyzer](https://www.postgresql.org/docs/current/sql-explain.html) on any expensive queries that you have identified: `explain analyze <query-statement-here>;`. For supabase-js/ postgREST queries you can use `.explain()`.
|
||||
Additionally, to check how long a query is taking, you can check the Query Performance report which can give you more information on the query's performance: https://app.supabase.com/project/_/reports/query-performance. You can use the [query plan analyzer](https://www.postgresql.org/docs/current/sql-explain.html) on any expensive queries that you have identified: `explain analyze <query-statement-here>;`. For supabase-js/ PostgREST queries you can use `.explain()`.
|
||||
|
||||
You can also make use of Postgres logs that will give you useful information like when the query was executed: https://app.supabase.com/project/_/logs/postgres-logs.
|
||||
|
||||
+3
-3
@@ -14,10 +14,10 @@ If you are using Email Authentication to login into your Supabase account:
|
||||
- Invite a new email address to your Supabase Organization(s) as an Organization owner.
|
||||
- After signing up for a new account with your new email address and confirming you have access to the Organization(s) with your new account, you can leave the Organization(s) from your previous account.
|
||||
|
||||
If you are using Github Authentication to login into your Supabase account:
|
||||
If you are using GitHub Authentication to login into your Supabase account:
|
||||
|
||||
- Log out of Supabase.
|
||||
- Change Primary Email in GitHub
|
||||
- Log out of Github.
|
||||
- Log back into Github (with the new, desired email set as primary)
|
||||
- Log out of GitHub.
|
||||
- Log back into GitHub (with the new, desired email set as primary)
|
||||
- Log back into Supabase.
|
||||
|
||||
@@ -31,7 +31,7 @@ It can be accessed in a project's [Email Templates](https://supabase.com/dashboa
|
||||
|
||||

|
||||
|
||||
If you need to update a user's meta-data, you can do so with the [updateUser](https://supabase.com/docs/reference/javascript/auth-updateuser?example=update-the-users-metadata) function.
|
||||
If you need to update a user's meta-data, you can do so with the [`updateUser`](https://supabase.com/docs/reference/javascript/auth-updateuser?example=update-the-users-metadata) function.
|
||||
|
||||
The meta-data can be used to store a users language preferences. You could then use "if statements" in the email template to set the response for a specific language:
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ DROP FUNCTION <function name>() CASCADE;
|
||||
|
||||
Then recreate the functions with a [security definer](https://supabase.com/docs/guides/database/functions#security-definer-vs-invoker) modifier before recreating the triggers.
|
||||
|
||||
The [SQL Editor](https://supabase.com/dashboard/project/_/sql/) contains a template for [User Management](https://supabase.com/dashboard/project/_/sql/quickstarts). Within it, there is a working example of how to setup triggers with security definers that may be worth referencing:
|
||||
The [SQL Editor](https://supabase.com/dashboard/project/_/sql/) contains a template for [User Management](https://supabase.com/dashboard/project/_/sql/quickstarts). Within it, there is a working example of how to setup triggers with security definer that may be worth referencing:
|
||||
|
||||
```sql
|
||||
create table profiles (
|
||||
|
||||
@@ -29,7 +29,7 @@ Add a prepared false flag to the client:
|
||||
export const client = postgres(connectionString, { prepare: false })
|
||||
```
|
||||
|
||||
# Node postgres
|
||||
# Node Postgres
|
||||
|
||||
[Just omit the "name" value in a query definition](https://node-postgres.com/features/queries#prepared-statements):
|
||||
|
||||
@@ -51,6 +51,6 @@ Follow the recommendation in the [asyncpg docs](https://magicstack.github.io/asy
|
||||
|
||||
> disable automatic use of prepared statements by passing `statement_cache_size=0` to [asyncpg.connect()](https://magicstack.github.io/asyncpg/current/api/index.html#asyncpg.connection.connect) and [asyncpg.create_pool()](https://magicstack.github.io/asyncpg/current/api/index.html#asyncpg.pool.create_pool) (and, obviously, avoid the use of [Connection.prepare()](https://magicstack.github.io/asyncpg/current/api/index.html#asyncpg.connection.Connection.prepare));
|
||||
|
||||
# Rust's Deadpool or tokio-postgres:
|
||||
# Rust's Deadpool or `tokio-postgres`:
|
||||
|
||||
- Check [Github Discussion](https://github.com/bikeshedder/deadpool/issues/340#event-13642472475)
|
||||
- Check [GitHub Discussion](https://github.com/bikeshedder/deadpool/issues/340#event-13642472475)
|
||||
|
||||
+1
-1
@@ -15,4 +15,4 @@ PostgREST supports 2 config parameters:
|
||||
|
||||
You do not need to add your "security definer" Functions to either of these if you are using them in your Policies.
|
||||
|
||||
PostgREST doesn’t need to know about this function on extra search path or exposed schemas, as long as you explicitly use the schema inside RLS (eg: `security.rls_func`).
|
||||
PostgREST doesn’t need to know about this function on extra search path or exposed schemas, as long as you explicitly use the schema inside RLS (e.g.: `security.rls_func`).
|
||||
|
||||
+9
-9
@@ -18,23 +18,23 @@ index row size exceeds btree version 4 maximum 2704 for index "idx_name"
|
||||
|
||||
## Summary
|
||||
|
||||
PG has a limit on a BTree tuple(=row) size. It needs to fit at least 3 btree tuples on a 8Kb page. That could not be changed.
|
||||
PG has a limit on a B-tree tuple(=row) size. It needs to fit at least 3 B-tree tuples on a 8Kb page. That could not be changed.
|
||||
|
||||
BTree row can be a single attribute or multiple attributes. These cases are better addressed separately.
|
||||
B-tree row can be a single attribute or multiple attributes. These cases are better addressed separately.
|
||||
|
||||
## BTree is built with multiple attributes
|
||||
## B-tree is built with multiple attributes
|
||||
|
||||
BTree with multiple attributes will perform better than several only in case the likely SELECT queries use several attributes that include the first attributes that are in the index. I.e. select by 1-st, 2-nd, 3-d but not by 2-nd, 3-d and 5-th index attributes.
|
||||
B-tree with multiple attributes will perform better than several only in case the likely SELECT queries use several attributes that include the first attributes that are in the index. I.e. select by 1-st, 2-nd, 3-d but not by 2-nd, 3-d and 5-th index attributes.
|
||||
|
||||
The other case when multiple attributes BTree is good is when we have INSERT/UPDATE workload that is comparable to SELECT load (generally SELECTS a way more often). Then we can save speed-up updating a single index instead of several at INSERT/UPDATE at cost of SELECT performance decrease.
|
||||
The other case when multiple attributes B-tree is good is when we have INSERT/UPDATE workload that is comparable to SELECT load (generally SELECTS a way more often). Then we can save speed-up updating a single index instead of several at INSERT/UPDATE at cost of SELECT performance decrease.
|
||||
|
||||
But most likely we have multiple attributes BTree index due to some automation tool, not by intention. Even without the mentioned error it's best to build separate single-attribute indexes for each attribute from it. Then drop multiple attributes BTree index. This is a must and the only solution when we have this error though.
|
||||
But most likely we have multiple attributes B-tree index due to some automation tool, not by intention. Even without the mentioned error it's best to build separate single-attribute indexes for each attribute from it. Then drop multiple attributes B-tree index. This is a must and the only solution when we have this error though.
|
||||
|
||||
## BTree is built on a single attribute that is very long
|
||||
## B-tree is built on a single attribute that is very long
|
||||
|
||||
This can be if the index is built on text, JSON column etc. It's not prohibited to build BTree on these datatypes, but it's also ineffective. Why?
|
||||
This can be if the index is built on text, JSON column etc. It's not prohibited to build B-tree on these datatypes, but it's also ineffective. Why?
|
||||
|
||||
One of the measures of index efficiency is the ratio of index entries to the width of all possible values space for this datatype. If we have say 100000 distinct values of int32 in the index then the ratio is 1/40000. If we have text with length of 2704 bytes (maximum for BTree index) we can hardly imagine the number of distinct values that gives us even a comparable ratio. That said indexing of that long values stores much redundancy in the index.
|
||||
One of the measures of index efficiency is the ratio of index entries to the width of all possible values space for this datatype. If we have say 100000 distinct values of int32 in the index then the ratio is 1/40000. If we have text with length of 2704 bytes (maximum for B-tree index) we can hardly imagine the number of distinct values that gives us even a comparable ratio. That said indexing of that long values stores much redundancy in the index.
|
||||
|
||||
The solution is simple: use some king of hashing to transfer the values to much narrower space. I.e. md5. The solution is simple, you build a functional index (=index by expression):
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ This error typically occurs as a protective measure to prevent unauthorized acce
|
||||
|
||||
To address this issue, we recommend following these troubleshooting steps:
|
||||
|
||||
- Verify Project ID: Ensure the $PROJECT*REF variable in your commands contains the correct Project ID. You can find your Reference ID under [Project -> Settings -> General](https://supabase.com/dashboard/project/*/settings/general) in your Supabase Dashboard. A Reference ID looks something like “xvljpkujuwroxcuvossw”.
|
||||
- Verify Project ID: Ensure the $PROJECT*REF variable in your commands contains the correct Project ID. You can find your Reference ID under [Project -> Settings -> General](https://supabase.com/dashboard/project/*/settings/general) in your Supabase Dashboard. A Reference ID looks something like `xvljpkujuwroxcuvossw`.
|
||||
- Authorization Check: Confirm that you’ve been properly authorized. You can also generate a new Access Token in your dashboard and use it for login. Generate a new token [here](https://supabase.com/dashboard/account/tokens) and use it to [log in](https://supabase.com/docs/reference/cli/supabase-login).
|
||||
- Re-link Project: Try [re-linking](https://supabase.com/docs/reference/cli/supabase-link) your project with the newly generated token.
|
||||
- Owner/Admin Permissions: Make sure you have [Owner/Admin](https://supabase.com/docs/guides/platform/access-control) permissions for the project.
|
||||
|
||||
@@ -35,6 +35,7 @@ The output should look something like this:
|
||||
|
||||
Here it is in an easier to read format
|
||||
|
||||
```
|
||||
- CONTAINER ID: < container id >
|
||||
- IMAGE: supabase-grafana-supabase-grafana
|
||||
- COMMAND: /entrypoint.sh
|
||||
@@ -42,6 +43,7 @@ Here it is in an easier to read format
|
||||
- STATUS: Up < unit of time > ago
|
||||
- PORTS: 3000/tcp, 0.0.0.0:8000 → 8080/tcp
|
||||
- NAMES: supabase-grafana-supabase-grafana-1
|
||||
```
|
||||
|
||||
## Step 4: Enter the container
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ INSERT INTO your_table_name (id, column1, column2, column3) VALUES
|
||||
(uuid_generate_v4(), 'value20', 21, FALSE);
|
||||
```
|
||||
|
||||
2. Run the following script (you may need to `pip install psycopg[binary]` in addition to supabase client.
|
||||
2. Run the following script (you may need to `pip install psycopg[binary]` in addition to Supabase client.
|
||||
|
||||
```
|
||||
import time
|
||||
@@ -96,4 +96,4 @@ if __name__ == "__main__":
|
||||
|
||||
## Expected behavior
|
||||
|
||||
The overhead from PostgREST shouldn't be higher than a few milliseconds at max. 60-70 ms is way too high. This is particular deceiving because one can run the query on the SQL Editor page and it reports the same time as the direct postgres query, which is not what actually happens.
|
||||
The overhead from PostgREST shouldn't be higher than a few milliseconds at max. 60-70 ms is way too high. This is particular deceiving because one can run the query on the SQL Editor page and it reports the same time as the direct Postgres query, which is not what actually happens.
|
||||
|
||||
+5
-5
@@ -1,13 +1,13 @@
|
||||
---
|
||||
title = "How do you troubleshoot NextJS - Supabase Auth issues?"
|
||||
title = "How do you troubleshoot Next.js - Supabase Auth issues?"
|
||||
github_url = "https://github.com/orgs/supabase/discussions/27606"
|
||||
date_created = "2024-06-28T03:26:44+00:00"
|
||||
topics = [ "auth" ]
|
||||
keywords = [ "NextJS", "SSR", "authentication", "client" ]
|
||||
keywords = [ "Next.js", "SSR", "authentication", "client" ]
|
||||
database_id = "4017d331-0b24-439a-9231-793801395ca2"
|
||||
---
|
||||
|
||||
Authentication is hard. SSR is harder. At Supabase, we try to address these together by providing`@supabase/ssr` package to help implement authentication with Supabase conveniently. While I’m targeting to address NextJS-related issues, you can probably apply these concepts to other SSR frameworks such as Nuxt, SvelteKit, and Remix. This is a living document, and we plan to update it regularly as NextJS and its APIs + `@supabase/ssr` evolve.
|
||||
Authentication is hard. SSR is harder. At Supabase, we try to address these together by providing`@supabase/ssr` package to help implement authentication with Supabase conveniently. While I’m targeting to address Next.js-related issues, you can probably apply these concepts to other SSR frameworks such as Nuxt, SvelteKit, and Remix. This is a living document, and we plan to update it regularly as Next.js and its APIs + `@supabase/ssr` evolve.
|
||||
|
||||
If you are experiencing issues with Supabase Auth and SSR, the following checklist will help you troubleshoot the issues.
|
||||
|
||||
@@ -21,13 +21,13 @@ This documentation will help you validate that you’re on the correct path.
|
||||
- SSR advanced guide - https://supabase.com/docs/guides/auth/server-side/advanced-guide
|
||||
- Creating a Supabase client for SSR - https://supabase.com/docs/guides/auth/server-side/creating-a-client?queryGroups=framework&framework=nextjs&queryGroups=environment&environment=middleware
|
||||
|
||||
Another way to identify any potential issues with your code is to compare it with the Supabase NextJS quickstart. Use the command `npx create-next-app -e with-supabase` to download a copy to your local machine of the quickstart application.
|
||||
Another way to identify any potential issues with your code is to compare it with the Supabase Next.js quickstart. Use the command `npx create-next-app -e with-supabase` to download a copy to your local machine of the quickstart application.
|
||||
|
||||
Our YouTube channel has great videos to help you implement Supabase Auth with Next.js.
|
||||
|
||||
- The Right Way to do Auth with the Next.js App Router - https://youtu.be/v6UvgfSIjQ0?si=TBUN9dD4pmjRg78a
|
||||
|
||||
Also, please take some time to get familiar with some concepts on authentication with NextJS, such as https://nextjs.org/docs/app/building-your-application/authentication.
|
||||
Also, please take some time to get familiar with some concepts on authentication with Next.js, such as https://nextjs.org/docs/app/building-your-application/authentication.
|
||||
|
||||
We know your requirements vary, and you might run into an edge case. In that scenario, please use our amazing community channels ([GitHub](https://github.com/orgs/supabase/discussions), [Discord](https://discord.gg/rxTfewPvys)) to get help troubleshooting the issue. You can post your issues to the `@supabase/ssr` [GitHub repo](https://github.com/supabase/ssr/issues). We always welcome your contributions!
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ database_id = "085d7649-0b5d-4c3c-b6a1-f2da4b9f33dd"
|
||||
message = "a role cannot be removed while it is still referenced in any database of the cluster"
|
||||
---
|
||||
|
||||
[Quote from postgres docs:](https://www.postgresql.org/docs/current/sql-droprole.html#:~:text=A%20role%20cannot%20be%20removed,been%20granted%20on%20other%20objects.)
|
||||
[Quote from Postgres docs:](https://www.postgresql.org/docs/current/sql-droprole.html#:~:text=A%20role%20cannot%20be%20removed,been%20granted%20on%20other%20objects.)
|
||||
|
||||
> A role cannot be removed if it is still referenced in any database of the cluster; an error will be raised if so. Before dropping the role, you must drop all the objects it owns (or reassign their ownership) and revoke any privileges the role has been granted on other objects.
|
||||
|
||||
|
||||
+20
-20
@@ -37,7 +37,7 @@ Logs provide insights into Postgres operations. They help meet compliance requir
|
||||
|
||||
The most practical way to explore and filter logs is through the [Logs Explorer](https://supabase.com/dashboard/project/_/logs/explorer).
|
||||
|
||||
It uses a subset of Bigquery SQL syntax and preparses queries for optimization. This imposes three primary limitations:
|
||||
It uses a subset of BigQuery SQL syntax and pre-parses queries for optimization. This imposes three primary limitations:
|
||||
|
||||
- No subqueries or `WITH` statements
|
||||
- No `*` wildcards for column names
|
||||
@@ -100,7 +100,7 @@ cross join unnest(parsed) AS parsed;
|
||||
|
||||
**Suggested use cases:**
|
||||
|
||||
- Filter by error severity or sql code
|
||||
- Filter by error severity or SQL code
|
||||
- Get hints, details, and context about error events
|
||||
|
||||
#### Connection/Identification information
|
||||
@@ -110,9 +110,9 @@ cross join unnest(parsed) AS parsed;
|
||||
| parsed.session_id | The session ID | 12345 |
|
||||
| parsed.session_start_time | The start time of the session | 2024-05-08 15:30:00 |
|
||||
| parsed.connection_from | The connection IP | 192.165.1.100 |
|
||||
| parsed.user_name | The name of the connecting database user | postgres |
|
||||
| parsed.application_name | The name of the application | Supavisor, postgREST |
|
||||
| parsed.database_name | The name of the database | postgres |
|
||||
| parsed.user_name | The name of the connecting database user | `postgres` |
|
||||
| parsed.application_name | The name of the application | Supavisor, PostgREST |
|
||||
| parsed.database_name | The name of the database | `postgres` |
|
||||
| parsed.process_id | The process ID, often used to identify extension workers | 1234 |
|
||||
| parsed.backend_type | Determine if the event originated internally (e.g., from background workers like pg_net, timescale, or pg_cron) or externally from a client (`client backend`) | `client backend` |
|
||||
|
||||
@@ -182,7 +182,7 @@ where
|
||||
|
||||
Queries can use complex syntax, so it is often helpful to isolate by referenced database objects, such as `functions`, `tables`, and `columns`. Because query structures can be complex, it is advised to use [regex](https://github.com/orgs/supabase/discussions/22640) to find matches. Some common regex patterns are:
|
||||
|
||||
- `(?i)`: ignore case sensitivty
|
||||
- `(?i)`: ignore case sensitivity
|
||||
- `.`: wildcard
|
||||
- `^`: look for values at start of string
|
||||
- `|`: or operator
|
||||
@@ -195,16 +195,16 @@ All failed queries, including those from PostgREST, Auth, and external libraries
|
||||
|
||||
API servers have assigned database roles for connecting to the database:
|
||||
|
||||
| Role | API/Tool |
|
||||
| -------------------------- | ------------------------------------------------------------------------- |
|
||||
| supabase_admin | Used by Supabase to configure projects and for monitoring |
|
||||
| authenticator | PostgREST |
|
||||
| supabase_auth_admin | Auth |
|
||||
| supabase_storage_admin | Storage |
|
||||
| supabase_realtime_admin | Realtime |
|
||||
| supabase_replication_admin | Synchronizes Read Replicas |
|
||||
| postgres | Supabase Dashboard and External Tools (e.g., Prisma, SQLAlchemy, PSQL...) |
|
||||
| Custom roles | External Tools (e.g., Prisma, SQLAlchemy, PSQL...) |
|
||||
| Role | API/Tool |
|
||||
| ---------------------------- | ------------------------------------------------------------------------- |
|
||||
| `supabase_admin` | Used by Supabase to configure projects and for monitoring |
|
||||
| `authenticator` | PostgREST |
|
||||
| `supabase_auth_admin` | Auth |
|
||||
| `supabase_storage_admin` | Storage |
|
||||
| `supabase_realtime_admin` | Realtime |
|
||||
| `supabase_replication_admin` | Synchronizes Read Replicas |
|
||||
| `postgres` | Supabase Dashboard and External Tools (e.g., Prisma, SQLAlchemy, PSQL...) |
|
||||
| Custom roles | External Tools (e.g., Prisma, SQLAlchemy, PSQL...) |
|
||||
|
||||
Filter by the `parsed.user_name` role to only retrieve logs made by specific roles:
|
||||
|
||||
@@ -219,7 +219,7 @@ where
|
||||
|
||||
## By Dashboard queries
|
||||
|
||||
Queries from the Supabase Dashboard are executed under the postgres role and include the comment `-- source: dashboard`. To isolate or exclude Dashboard requests during debugging, you can filter by this comment.
|
||||
Queries from the Supabase Dashboard are executed under the `postgres` role and include the comment `-- source: dashboard`. To isolate or exclude Dashboard requests during debugging, you can filter by this comment.
|
||||
|
||||
```sql
|
||||
-- find queries executed by the Dashboard
|
||||
@@ -265,7 +265,7 @@ When recording what is accessed and by whom, logging based on database roles and
|
||||
|
||||
You can use the [pg_audit](https://supabase.com/docs/guides/database/extensions/pgaudit) extension to selectively log relevant queries (not just errors) by certain roles, against specific database objects.
|
||||
|
||||
You should take care when using the extension to not log all database events, but only what is absolutely necessary. Overlogging can strain the database and create log noise that makes it difficult to filter for relevant events.
|
||||
You should take care when using the extension to not log all database events, but only what is absolutely necessary. Over-logging can strain the database and create log noise that makes it difficult to filter for relevant events.
|
||||
|
||||
**Filtering by pg_audit**:
|
||||
|
||||
@@ -339,7 +339,7 @@ where
|
||||
|
||||
## Changing log settings
|
||||
|
||||
> WARNING: lenient settings can lead to overlogging, impacting database performance while creating noise in the logs.
|
||||
> WARNING: lenient settings can lead to over-logging, impacting database performance while creating noise in the logs.
|
||||
|
||||
#### Severity levels
|
||||
|
||||
@@ -376,7 +376,7 @@ By default, only failed queries are logged. The [PGAudit extension](https://supa
|
||||
|
||||
#### Logging within database functions
|
||||
|
||||
To track or debug functions, logging can be configured by followin the [function debugging guide](https://supabase.com/docs/guides/database/functions#general-logging)
|
||||
To track or debug functions, logging can be configured by following the [function debugging guide](https://supabase.com/docs/guides/database/functions#general-logging)
|
||||
|
||||
# Frequently Asked Questions
|
||||
|
||||
|
||||
+3
-3
@@ -7,13 +7,13 @@ keywords = [ "index", "hnsw", "vector", "performance", "speed" ]
|
||||
database_id = "7d755701-747f-4c3a-b8be-236c5518e4eb"
|
||||
---
|
||||
|
||||
> Although this guide is specifically for HSNW indexes, it can be generalized to work for any index type
|
||||
> Although this guide is specifically for HNSW indexes, it can be generalized to work for any index type
|
||||
|
||||
> Building an index without the `CONCURRENTLY` modifier will lock the table, but it will also increase build times. For general advice about indexes, check out this [guide](https://github.com/orgs/supabase/discussions/22449).
|
||||
|
||||
### **To speed up queries, it is ideal to create an HSNW index on your embedded column**
|
||||
|
||||
The general structure for creating an hsnw index follows this pattern:
|
||||
The general structure for creating an HNSW index follows this pattern:
|
||||
|
||||
```sql
|
||||
CREATE INDEX <custom name of index> ON <table name> USING hnsw (<vectorized column> <search type>);
|
||||
@@ -100,7 +100,7 @@ show statement_timeout;
|
||||
|
||||
**6. Consider temporarily upgrading your compute size (optional)**
|
||||
|
||||
If your task is particularly long, you can speed it up by boosting your computing power temporarily. Compute size is charged by the hour, so you can increase it for an hour or two to finish your task faster, then scale it back afterward. Here is a list of [compute add-ons](https://supabase.com/docs/guides/platform/compute-add-ons). If you want to temporarily upgrade, you can find the add-ons for your project in your [Dashboard's Add-Ons Settings.](https://supabase.green/dashboard/project/_/settings/addons)
|
||||
If your task is particularly long, you can speed it up by boosting your computing power temporarily. Compute size is charged by the hour, so you can increase it for an hour or two to finish your task faster, then scale it back afterwards. Here is a list of [compute add-ons](https://supabase.com/docs/guides/platform/compute-add-ons). If you want to temporarily upgrade, you can find the add-ons for your project in your [Dashboard's Add-ons Settings.](https://supabase.green/dashboard/project/_/settings/addons)
|
||||
|
||||
**7. Consider increasing disk size (optional)**
|
||||
|
||||
|
||||
+1
-1
@@ -22,7 +22,7 @@ Sometimes it can be informative to log values from your Edge Functions. This wal
|
||||
npx supabase init
|
||||
```
|
||||
|
||||
3. create a .env file in the supabase folder
|
||||
3. create a .env file in the `supabase` folder
|
||||
|
||||
```bash
|
||||
echo "MY_NAME=Some_name" >> ./supabase/.env
|
||||
|
||||
@@ -14,14 +14,14 @@ database_id = "43b62743-252c-4069-a719-0c0737839fd9"
|
||||
- System prioritizes keeping actively used data in RAM
|
||||
- Data is temporarily cached in memory for quick access
|
||||
- Swap is used for less frequently accessed or non-critical data
|
||||
- Default swapiness value of 60 indicates the system's preference to fill up swap for performance benefits
|
||||
- Default swappiness value of 60 indicates the system's preference to fill up swap for performance benefits
|
||||
|
||||
## Compute instance configuration
|
||||
|
||||
- Default swapiness: 60 (out of 100)
|
||||
- Default swappiness: 60 (out of 100)
|
||||
- Determines how aggressively the system moves data from RAM to swap
|
||||
- Default value in Linux distributions
|
||||
- Changing the swapiness value significantly impacts system behavior, as it is a non-linear value. A small adjustment of 10 can result in a very different system behavior
|
||||
- Changing the swappiness value significantly impacts system behavior, as it is a non-linear value. A small adjustment of 10 can result in a very different system behavior
|
||||
- Swap provision: 1024MB on every compute instance
|
||||
|
||||
## Issues with high swap usage
|
||||
@@ -45,7 +45,7 @@ database_id = "43b62743-252c-4069-a719-0c0737839fd9"
|
||||
|
||||
- Monitor memory usage:
|
||||
- If usage is over 85% and swap usage is over 90% for an extended period, optimize database access or add more resources
|
||||
- Monitor CPU iowait usage and disk IO metrics:
|
||||
- Monitor CPU IOWait usage and disk IO metrics:
|
||||
- Exported metrics here: **https://github.com/supabase/grafana-agent-fly-example/blob/main/metrics.md**
|
||||
- Key metrics to track excessive disk IO usage include:
|
||||
- `node_disk_reads_completed_total`: Tracks the total number of completed disk reads.
|
||||
|
||||
+2
-2
@@ -7,10 +7,10 @@ keywords = [ "cache", "rls", "next.js" ]
|
||||
database_id = "9be3727b-c3ea-4dda-b30c-55f3c8aa8f35"
|
||||
---
|
||||
|
||||
Next.js caches urls in certain cases. This is causing users to lose lots of time debugging early on with RLS changes. Changing the table data in the UI will also not be returned in these cases.
|
||||
Next.js caches URLs in certain cases. This is causing users to lose lots of time debugging early on with RLS changes. Changing the table data in the UI will also not be returned in these cases.
|
||||
|
||||
You can look at the Dashboard API Edge log to see if the request is making it to Supabase.
|
||||
Another way to check if the url caching is impacting you is to change `.select('*')` to `.select('colname')` or change column names. This would bust the next.js cache.
|
||||
Another way to check if the URL caching is impacting you is to change `.select('*')` to `.select('colname')` or change column names. This would bust the next.js cache.
|
||||
|
||||
Users have suggested the following three options to turn off the caching, but please refer to next.js docs as needed.
|
||||
|
||||
|
||||
+1
-1
@@ -9,7 +9,7 @@ database_id = "3e9246cb-d592-4051-93c8-53e4e555711c"
|
||||
|
||||
The reason behind this limitation is that the auth helpers library lacks a direct mechanism for performing server-side redirects, as each framework handles redirects differently. However, the library does offer a URL through the data property it returns, which should be utilized for the purpose of redirection.
|
||||
|
||||
**NextJS:**
|
||||
**Next.js:**
|
||||
|
||||
```ts
|
||||
import { NextResponse } from "next/server";
|
||||
|
||||
+1
-1
@@ -25,7 +25,7 @@ LIKE option will copy column definitions, constraints etc. from source table aut
|
||||
|
||||
- Indices definition will be copied with INCLUDING ALL option. They will be propagated during insertion of data into a new parent table.
|
||||
|
||||
- Of course you can hold old table for your data to be safe indefinitely. Do not delete it outright! Your queries after renaming will not use it. Indexes etc. work behind the scenes, you are not required to know its names. (In reality indices for a "new" partitioned table will be new, but you won't see it if you have used LIKE..INCLUDING ALL option).
|
||||
- Of course you can hold old table for your data to be safe indefinitely. Do not delete it outright! Your queries after renaming will not use it. Indexes etc. work behind the scenes, you are not required to know its names. (In reality indices for a "new" partitioned table will be new, but you won't see it if you have used `LIKE..INCLUDING ALL` option).
|
||||
|
||||
- Triggers will be needed to re-create for "parent" partitioned table using `CREATE TRIGGER` clause https://www.postgresql.org/docs/16/sql-createtrigger.html
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ cli = [ "supabase-login", "supabase-link", "supabase-init", "supabase-storage-cp
|
||||
|
||||
Pro-Projects at the moment cannot paused. However, [You are allowed to have two free organizations](https://supabase.com/docs/guides/platform/org-based-billing#mix-of-free-and-pro-plan-projects) that can support one active project each and an unlimited amount of paused ones.
|
||||
|
||||
If a project is under 500MB, you can [transfer it to be under a free organization](https://supabase.com/docs/guides/platform/project-transfer). Afterward, you can initiate a pause.
|
||||
If a project is under 500MB, you can [transfer it to be under a free organization](https://supabase.com/docs/guides/platform/project-transfer). Afterwards, you can initiate a pause.
|
||||
|
||||
Alternatively, you can download a [daily backup](https://supabase.com/dashboard/project/_/database/backups/scheduled) of just your database for archiving. You can also manually download a .SQL file of your database and storage buckets by following this [guide](https://supabase.com/docs/guides/platform/migrating-and-upgrading-projects#migrate-your-project).
|
||||
|
||||
|
||||
@@ -152,7 +152,7 @@ order by timestamp desc
|
||||
limit 100;
|
||||
```
|
||||
|
||||
If you're interested in modifying the query, there is an advanced [guide](https://github.com/orgs/supabase/discussions/26224) for navigating the postgres logs and a general purpose [one](https://supabase.com/docs/guides/platform/advanced-log-filtering) for applying filters.
|
||||
If you're interested in modifying the query, there is an advanced [guide](https://github.com/orgs/supabase/discussions/26224) for navigating the Postgres logs and a general purpose [one](https://supabase.com/docs/guides/platform/advanced-log-filtering) for applying filters.
|
||||
|
||||
<br />
|
||||
|
||||
@@ -190,7 +190,7 @@ You can then search for your custom messages in the [Logs Interface](https://sup
|
||||
|
||||
### Upgrading pg_cron version
|
||||
|
||||
The current version of pg*cron on Supabase is 1.6.4. It comes with a [few bug fixes](https://github.com/citusdata/pg_cron/releases/tag/v1.6.4). You should consider upgrading to postgres v15.6.1.122+ in the[ Infrastructure Settings](https://supabase.com/dashboard/project/*/settings/infrastructure) to get the latest extension.
|
||||
The current version of pg*cron on Supabase is 1.6.4. It comes with a [few bug fixes](https://github.com/citusdata/pg_cron/releases/tag/v1.6.4). You should consider upgrading to Postgres v15.6.1.122+ in the[ Infrastructure Settings](https://supabase.com/dashboard/project/*/settings/infrastructure) to get the latest extension.
|
||||
|
||||
<br />
|
||||
|
||||
|
||||
@@ -91,7 +91,7 @@ Some users have discussed how they managed this problem in a [GitHub Discussion.
|
||||
create user "prisma" with password 'secret_password' bypassrls createdb;
|
||||
```
|
||||
|
||||
> Prisma requires the [createdb modifier](https://supabase.com/blog/postgres-roles-and-privileges#role-attributes) to create shadow databases. It uses them to help manage migrations.
|
||||
> Prisma requires the [`createdb` modifier](https://supabase.com/blog/postgres-roles-and-privileges#role-attributes) to create shadow databases. It uses them to help manage migrations.
|
||||
|
||||
### Give Postgres ownership of the new user:
|
||||
|
||||
|
||||
+2
-2
@@ -102,7 +102,7 @@ Check ownership with this [GitHub Gist](https://gist.github.com/TheOtherBrian1/6
|
||||
ALTER <object type (table, function, etc.)> <auth.object_name> OWNER TO supabase_auth_admin;
|
||||
```
|
||||
|
||||
Alternatively, you can run the SQL script in this [Github Gist](https://gist.github.com/TheOtherBrian1/4714a333432b80660ff71b136b298fb8) to change all
|
||||
Alternatively, you can run the SQL script in this [GitHub Gist](https://gist.github.com/TheOtherBrian1/4714a333432b80660ff71b136b298fb8) to change all
|
||||
|
||||
### Trigger related:
|
||||
|
||||
@@ -169,7 +169,7 @@ order by timestamp
|
||||
|
||||
### Database migration errors
|
||||
|
||||
> "running db migrations: Migrator: problem creating schema migrations"
|
||||
> `running db migrations: Migrator: problem creating schema migrations`
|
||||
|
||||
This is a continuation of the "Corrupted Schema" error from the Postgres Section. If you modified structures in the auth schema, such as columns or tables, or added restrictions, such as RLS, Auth will not be able to complete it's migrations. It's necessary to remove those modifications.
|
||||
|
||||
|
||||
+1
-1
@@ -33,7 +33,7 @@ Upon project creation, a static IPv6 address is assigned. However, it's essentia
|
||||
|
||||
### IPv4 address
|
||||
|
||||
Opting for the static [IPv4 addon](https://supabase.com/docs/guides/platform/ipv4-address) provides a more stable connection address. The IPv4 address remains constant unless:
|
||||
Opting for the static [IPv4 add-on](https://supabase.com/docs/guides/platform/ipv4-address) provides a more stable connection address. The IPv4 address remains constant unless:
|
||||
|
||||
- The project is paused or resumed.
|
||||
- Unlike the IPv6 address, upgrading your database does not affect the IPv4 address.
|
||||
|
||||
@@ -34,8 +34,8 @@ Improvement seen over 100x on large tables.
|
||||
|
||||
#### 2. Another method to improve performance is to wrap your RLS queries and functions in select statements.
|
||||
|
||||
This method works well for jwt functions like auth.uid() and auth.jwt() as well as any other functions including security definer type.
|
||||
Wrapping the function in some SQL causes an initPlan to be run by the optimizer which allows it to "cache" the results versus calling the function
|
||||
This method works well for JWT functions like `auth.uid()` and `auth.jwt()` as well as any other functions including security definer type.
|
||||
Wrapping the function in some SQL causes an `initPlan` to be run by the optimizer which allows it to "cache" the results versus calling the function
|
||||
on each row.
|
||||
WARNING: You can only do this if the results of the query or function do not change based on the row data.
|
||||
For RLS like this:
|
||||
@@ -108,7 +108,7 @@ Note that if the `in` list gets to be over 10K items, then extra analysis is lik
|
||||
|
||||
#### 6. Use role in TO option or roles dropdown in the dashboard.
|
||||
|
||||
Never just use RLS involving auth.uid() or auth.jwt() as your way to rule out 'anon' role.
|
||||
Never just use RLS involving `auth.uid()` or `auth.jwt()` as your way to rule out 'anon' role.
|
||||
Always add 'authenticated' to the approved roles instead of nothing or public.
|
||||
Although this does not improve the query performance for the signed in user it does
|
||||
eliminate 'anon' users without taxing the database to process the rest of the RLS.
|
||||
@@ -134,10 +134,10 @@ Show RLS and before after for above examples.
|
||||
|
||||
### Tools to measure performance
|
||||
|
||||
Postgres has tools to analyze the peformance of queries. https://www.postgresql.org/docs/current/sql-explain.html
|
||||
Postgres has tools to analyze the performance of queries. https://www.postgresql.org/docs/current/sql-explain.html
|
||||
The use of explain in detail for query analysis is beyond the scope of this discussion.
|
||||
Here we will use it mainly to get a performance metric to compare times.
|
||||
In order to do RLS testing you need to setup the user jwt claims and change the running user to `anon` or `authenticated`.
|
||||
In order to do RLS testing you need to setup the user JWT claims and change the running user to `anon` or `authenticated`.
|
||||
|
||||
```sql
|
||||
set session role authenticated;
|
||||
@@ -161,7 +161,7 @@ In this case the execution time is the critical number we need to compare.
|
||||
|
||||
PostgREST allows use of explain to get performance information on your queries with Supabase clients.
|
||||
|
||||
Before using this feature you need to run the following command in the Dashboad SQL editor (should not be used in production):
|
||||
Before using this feature you need to run the following command in the Dashboard SQL editor (should not be used in production):
|
||||
|
||||
```sql
|
||||
alter role authenticator set pgrst.db_plan_enabled to true;
|
||||
@@ -192,10 +192,11 @@ Planning Time: 0.092 ms
|
||||
Execution Time: 0.046 ms
|
||||
```
|
||||
|
||||
\*These two github discussions cover the history that lead to this analysis...
|
||||
\*These two GitHub discussions cover the history that lead to this analysis...
|
||||
|
||||
[Stable functions do not seem to be honored in RLS in basic form](https://github.com/orgs/supabase/discussions/9311)
|
||||
[current_setting can lead to bad performance when used on RLS](https://github.com/PostgREST/postgrest-docs/issues/609#)
|
||||
{/* supa-mdx-lint-disable-next-line Rule003Spelling */}
|
||||
Thanks Steve Chavez and Wolfgang Walther in those threads.
|
||||
|
||||
### Added example of security definer function having select of a team table, comparing against a column in main table
|
||||
@@ -225,6 +226,8 @@ $$ language plpgsql security definer;
|
||||
|
||||
Some results:
|
||||
|
||||
{/* supa-mdx-lint-disable Rule003Spelling */}
|
||||
|
||||
| Policy | Index | Main Rs | Team Rs | on 10 teams | 100 | 500 | note |
|
||||
| -------------------------------- | ----- | ------- | ------- | ----------- | ----- | ----- | ------------------ |
|
||||
| =ANY(user_teams()) | no | 1M | 1000 | >2Min | >2Min | >2Min | TO or killed |
|
||||
@@ -233,3 +236,5 @@ Some results:
|
||||
| =ANY(ARRAY(select user_teams())) | yes | 1M | 1000 | 2ms | 3 | 3 | |
|
||||
| in(1,2,3...100) | no | 1M | NA | 130ms | 142 | x | baseline check |
|
||||
| =ANY(ARRAY(select user_teams())) | yes | 1M | 10K | x | x | x | 24ms (on 1K teams) |
|
||||
|
||||
{/* supa-mdx-lint-enable Rule003Spelling */}
|
||||
|
||||
@@ -21,7 +21,7 @@ Whatever the reason, here's how to rotate the keys for your Supabase project.
|
||||
/>
|
||||
|
||||
3. Click the `Generate new secret` button and choose either a random secret, or custom if you'd like to supply one of your own.
|
||||
4. NOTE: Once regeneraged, all current API secrets will be immediately invalidated, and all connections using them will be severed. You will need to deploy the new secrets for connections to begin working again.
|
||||
4. NOTE: Once regenerated, all current API secrets will be immediately invalidated, and all connections using them will be severed. You will need to deploy the new secrets for connections to begin working again.
|
||||
5. Confirm the changes in the warning that pops up by clicking `Generate New Secret` again.
|
||||
|
||||
<img
|
||||
|
||||
@@ -22,6 +22,6 @@ Enabling anonymous sign-ins on your project does not reduce its security. Here's
|
||||
- Demo Mode: You can enable users to try out your product in a demo mode without full account creation.
|
||||
- Feature Restrictions: You can limit certain actions (like posting public content) to users who sign up with more identifiable information (e.g., Google or Apple sign-ins), while still allowing anonymous users to explore your app.
|
||||
|
||||
Remember, the underlying security of your project will remain the same. If your project was secure before enabling anonymous sign-ins, it will continue to be secure afterward. It's important to review and adjust your RLS policies regularly to align with your security and business needs.
|
||||
Remember, the underlying security of your project will remain the same. If your project was secure before enabling anonymous sign-ins, it will continue to be secure afterwards. It's important to review and adjust your RLS policies regularly to align with your security and business needs.
|
||||
|
||||
If you have further questions or need assistance in setting up, please don't hesitate to contact the [support team](https://supabase.com/dashboard/support/new).
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
---
|
||||
title = "Security of anonymous sign-ins"
|
||||
github_url = "https://github.com/orgs/supabase/discussions/22855"
|
||||
date_created = "2024-04-18T07:44:47+00:00"
|
||||
topics = [ "auth", "database", "platform", "storage" ]
|
||||
keywords = [ "anonymous", "security", "sign-in", "auth", "jwt" ]
|
||||
database_id = "799fb49d-4c0e-4f1a-adf2-20a55a97c3c2"
|
||||
---
|
||||
|
||||
A common concern is that enabling anonymous sign-ins on your project decreases its security. This is not true, and hopefully this thread will help you grasp why.
|
||||
|
||||
Anonymous users are just users that don't have an email address or phone number associated yet. Otherwise, they are just like any other user in the system. They have their own row in `auth.users`, their own unique user ID, etc.
|
||||
|
||||
All RLS policies in place for regular users also apply to anonymous users. Therefore, the security of the project does not decrease. To further illustrate this, let's briefly forget that anonymous users exist.
|
||||
|
||||
Let's imagine a human trying to trick the project's Auth system, so they use a temp-mail service (like DuckDuckGo's duck.com or any of the multitude temp-mail services out there) to sign up. That user account has an email address, but it's not a particularly _identifying_ one. You can't link it to a human, and in fact the email address may not exist 5 minutes later.
|
||||
|
||||
What I just described is the anonymous sign-ins feature, just implemented differently.
|
||||
|
||||
Therefore, if the project has a security issue _before_ enabling anonymous sign-ins, they will continue to have the same security issue. If they don't have one, they will continue to not have one after enabling.
|
||||
|
||||
So what's the deal then? Anonymous sign-ins (like the temp-mail case above) can be slightly easier to abuse with bots and scripts than OAuth sign-in methods. For this reason turn on CAPTCHA which ensures to some degree of certainty that it's humans and not scripts behind the sign-ins.
|
||||
|
||||
Supabase Auth exposes the `is_anonymous` claim in the user's JWT if that user comes from a `supabase.auth.signInAnonymously()` call. This is mainly a _restrictive_ feature. Let's look at some examples of how it can be used.
|
||||
|
||||
Suppose your project uses only Google and Apple sign-in, which to a high degree of certainty enforce "humanness." (You have to give up your address, real phone number, they track your phone etc.) But, you're faced with an adoption problem. Folks are not signing up because they want to try out the product first without giving their user data.
|
||||
|
||||
One way to solve this is to add "demo mode" in your project. This will allow users to try out the real product without committing their personal data or money ahead. But this poses an issue for you -- you don't want "spam" on your platform, and you certainly don't want all features to be available without having some way to identify the human behind the actions.
|
||||
|
||||
To put this in practical terms, let's imagine you're building a blogging platform.
|
||||
|
||||
Users that are _quite human_ (like coming from Google or Apple) get all features. This is because you could, eventually, sue them for harassment, pornography, etc.
|
||||
|
||||
But users in "demo mode" don't get all features. Maybe they can only:
|
||||
|
||||
- Write blog posts as drafts, but not publish them. (Goal: Users should experience the great writing experience.)
|
||||
- This can be achieved by, for example, restricting `INSERT` and `UPDATE` on the `public.posts` table when `auth.jwt()->>'is_anonymous'` is true.
|
||||
- Don't allow users to attach pictures to posts. (Goal: Storage costs money, and you don't want anonymous users to upload inappropriate content.)
|
||||
- Use [Storage RLS](https://supabase.com/docs/guides/storage/security/access-control) policies to restrict uploads when `is_anonymous` is true.
|
||||
- Prevent anonymous users from commenting on the blogging platform, but being able to read all comments. (Goal: Keep an orderly platform.)
|
||||
- This can be achieved by restricting `INSERT` on the `public.comments` table but allow `SELECT` when `is_anonymous` is true.
|
||||
|
||||
Resources:
|
||||
|
||||
- Postgres RLS policies (permissive vs restrictive): https://www.postgresql.org/docs/current/ddl-rowsecurity.html
|
||||
- Supabase Anonymous sign-ins docs: https://supabase.com/docs/guides/auth/auth-anonymous
|
||||
- Storage access control (RLS): https://supabase.com/docs/guides/storage/security/access-control
|
||||
+1
-1
@@ -14,6 +14,6 @@ message = "FATAL: no pg_hba.conf entry for host"
|
||||
|
||||
The authentication failed because the user/password credentials were invalid: `(user "xxxx", database "yyyy")`. This could happen if you're trying to connect to the database using wrong or revoked credentials. These errors indicate a failed login attempt was made to your database, meaning the connection wasn't established.
|
||||
|
||||
It is common to see failed connection attempts that use default usernames (such as `user "pgbouncer"`, `database "postgres"`). Being on the public internet means some level of unauthorized access attempts are possible. These are very unsophisticated attempts that usually involve trying combinations like root, psql, test and postgres usernames.
|
||||
It is common to see failed connection attempts that use default usernames (such as `user "pgbouncer"`, `database "postgres"`). Being on the public internet means some level of unauthorized access attempts are possible. These are very unsophisticated attempts that usually involve trying combinations like root, psql, test and Postgres usernames.
|
||||
|
||||
Supabase takes security seriously and works diligently to ensure the safety of your data.
|
||||
|
||||
+1
-1
@@ -24,7 +24,7 @@ Here are your options if your server platform doesn't support IPv6:
|
||||
|
||||
- Use the Supavisor Connection String (available in the [Dashboard](https://supabase.com/dashboard/project/_/settings/database)).
|
||||
- Use the [Supabase Client libraries](https://supabase.com/docs/guides/api/rest/client-libs), which are IPv4 compatible.
|
||||
- Enable the [dedicated IPv4 Add-On](https://supabase.com/dashboard/project/_/settings/addons) (available to Pro and above orgs)
|
||||
- Enable the [dedicated IPv4 Add-On](https://supabase.com/dashboard/project/_/settings/addons) (available to Pro and above organizations)
|
||||
|
||||
> Note: the IPv4 Add-On costs $0.0055 an hour, which equates to ~$4.00 if left on for a full month (~720 hours)
|
||||
|
||||
|
||||
+3
-3
@@ -21,7 +21,7 @@ A single application server can create multiple connections. A client connection
|
||||
|
||||
This represents a direct connection to the database.
|
||||
|
||||
Your application servers can directly connect using the db connection string:
|
||||
Your application servers can directly connect using the DB connection string:
|
||||
|
||||
```md
|
||||
postgresql://postgres:[PASSWORD]@db.[PROJECT REF].supabase.co:5432/postgres
|
||||
@@ -31,7 +31,7 @@ When connecting through the pooler, it will establish db/direct connections on b
|
||||
|
||||
## Max_connections:
|
||||
|
||||
Configured with the max_connections system variable, it represents how many direct/database connections postgres will tolerate. You can view your instance's settings by running this SQL:
|
||||
Configured with the max_connections system variable, it represents how many direct/database connections Postgres will tolerate. You can view your instance's settings by running this SQL:
|
||||
|
||||
```sql
|
||||
SHOW max_connections;
|
||||
@@ -52,7 +52,7 @@ Transaction mode gives the pooler permission to share direct connections among m
|
||||
postgres://postgres.obfwhevidiamwdwki:[YPASSWORD]@aws-0-ca-central-1.pooler.supabase.com:**6543**/postgres
|
||||
```
|
||||
|
||||
Postgres connections use the Postgres Wire Protocol (PWP) rather than HTTP. PWP acts like a websocket: once a connection is made, it stays open and active until the client disconnects.
|
||||
Postgres connections use the Postgres Wire Protocol (PWP) rather than HTTP. PWP acts like a WebSocket: once a connection is made, it stays open and active until the client disconnects.
|
||||
|
||||
If too many connections are held by idle or greedy clients, other application servers won’t be able to connect to your database. Transaction mode helps avoid this problem by allowing clients to access the database connections only when they are running a query. This reduces the chances of hitting the maximum direct connection limit.
|
||||
|
||||
|
||||
@@ -74,7 +74,7 @@ This behavior mirrors a direct connection, allowing greedy clients to monopolize
|
||||
|
||||
Depending on your application's configurations, having the pooler manage a queue of patient clients is preferable to the alternative of constantly polling the database to check for an available connection. Session mode can queue clients for up to a minute. If this isn't particularly relevant to your application design, then the primary benefit is that it is [IPv4 compatible](https://github.com/orgs/supabase/discussions/27034). Also, unlike transaction mode, it supports prepared statements.
|
||||
|
||||
### What happens when a client library, such as prisma, connects through Supavisor?
|
||||
### What happens when a client library, such as Prisma, connects through Supavisor?
|
||||
|
||||
When clients connect to either PostgreSQL or Supavisor, they do so with the PostgreSQL Wire Protocol. Because of this, clients treat connections with pooler as if they were directly connected to PostgreSQL. The pooler then smoothly acts as a messenger between the database and the client.
|
||||
|
||||
@@ -94,14 +94,14 @@ Now, imagine the tournament organizers decide to expand the venue to house 200 p
|
||||
|
||||
### What is the "user+db+mode" combination?
|
||||
|
||||
PostgreSQL is not actually a database. It is a Relational Database Management System (RDMS). Within it, you can spawn PostgreSQL databases. In Supabase, it is a common pattern to just use the default database called postgres, but you could create more:
|
||||
PostgreSQL is not actually a database. It is a Relational Database Management System (RDMS). Within it, you can spawn PostgreSQL databases. In Supabase, it is a common pattern to just use the default database called `postgres`, but you could create more:
|
||||
|
||||
```sql
|
||||
CREATE DATABASE postgres;
|
||||
CREATE DATABASE another_database;
|
||||
```
|
||||
|
||||
Similarly, a database can have many database users, but most people just rely on the default user "postgres".
|
||||
Similarly, a database can have many database users, but most people just rely on the default user `postgres`.
|
||||
|
||||
```sql
|
||||
CREATE USER postgres WITH PASSWORD 'super-secret-password;
|
||||
@@ -144,7 +144,7 @@ You can also change the pool size for PostgREST's (DB API) internal pooler at th
|
||||
|
||||
Supabase Storage uses Supavisor internally. The other servers that communicate with Postgres (PostgREST, Realtime, and Auth) all rely on internal application poolers.
|
||||
|
||||
Supavisor is primarily intended for users who do not want to rely on the Supase Client libraries and instead prefer to work with external ORMs, such as Prisma, Drizzle, and Psycopg.
|
||||
Supavisor is primarily intended for users who do not want to rely on the Supabase Client libraries and instead prefer to work with external ORMs, such as Prisma, Drizzle, and Psycopg.
|
||||
|
||||
### **Should I change Supavisor's pool size?**
|
||||
|
||||
|
||||
+1
-1
@@ -142,7 +142,7 @@ On top of that, you have to multiply the cost and the time with the number of
|
||||
| **Index Only Scan** | Retrieves all needed data from the index itself, without visiting the table. |
|
||||
| **Bitmap Heap Scan** | Uses a bitmap of row locations to efficiently retrieve rows from the table. |
|
||||
| **Bitmap Index Scan** | Builds a bitmap by scanning the index to efficiently locate rows. |
|
||||
| **Tid Scan** | Fetches rows directly using tuple identifiers, used in sub-selects. |
|
||||
| **`Tid` Scan** | Fetches rows directly using tuple identifiers, used in sub-selects. |
|
||||
| **Nested Loop** | Joins two tables by scanning the first and then the second for each row. |
|
||||
| **Merge Join** | Joins two pre-sorted tables, efficient for large datasets. |
|
||||
| **Hash Join** | Uses a hash table to perform joins, often faster for larger datasets. |
|
||||
|
||||
+2
-2
@@ -28,9 +28,9 @@ I'm also gonna include some screenshots below on what those steps look like:
|
||||
|
||||
A lot of this was figured out through trial and error - i don't have explanations as to why only port 465 works with `smtp-relay.gmail.com` and / or why the TLS encryption option has no effect on SMTP.
|
||||
|
||||
As to whether this is a Supabase Auth bug, i highly doubt that it is because we use the [native golang smtp library ](https://pkg.go.dev/net/smtp). If anyone still has issues getting google SMTP to work, i would recommend to switch to one of these SMTP providers:
|
||||
As to whether this is a Supabase Auth bug, i highly doubt that it is because we use the [native Golang SMTP library ](https://pkg.go.dev/net/smtp). If anyone still has issues getting google SMTP to work, i would recommend to switch to one of these SMTP providers:
|
||||
|
||||
- [Resend](https://resend.com/blog/how-to-configure-supabase-to-send-emails-from-your-domain)
|
||||
- [Sendgrid](https://sendgrid.com/en-us)
|
||||
- [SendGrid](https://sendgrid.com/en-us)
|
||||
- [Mailgun](https://www.mailgun.com/)
|
||||
- AWS SES SMTP
|
||||
|
||||
@@ -22,7 +22,7 @@ It is recommended that you connect with the pooler in transaction mode (port 654
|
||||
postgres://[db-user].[project-ref]:[db-password]@aws-0-[aws-region].pooler.supabase.com:6543
|
||||
```
|
||||
|
||||
When using transaction mode, you should use the[ NullPool setting:](https://docs.sqlalchemy.org/en/20/core/pooling.html#switching-pool-implementations)
|
||||
When using transaction mode, you should use the[ `NullPool` setting:](https://docs.sqlalchemy.org/en/20/core/pooling.html#switching-pool-implementations)
|
||||
|
||||
```py
|
||||
from sqlalchemy.pool import NullPool
|
||||
|
||||
@@ -12,7 +12,7 @@ database_id = "a5524182-9dc4-4470-86fe-45ace109a53a"
|
||||
**Debugging Steps**
|
||||
|
||||
**1. Test if webhooks are active:**
|
||||
Webhooks are run in a Postgresql background worker. The first debugging step is to see if the worker is active. Run the following SQL code:
|
||||
Webhooks are run in a Postgres background worker. The first debugging step is to see if the worker is active. Run the following SQL code:
|
||||
|
||||
```sql
|
||||
select pid from pg_stat_activity where backend_type ilike '%pg_net%';
|
||||
@@ -85,4 +85,4 @@ If none of the above suggestions uncovered the cause of the error, for debugging
|
||||
|
||||
**Conclusion:**
|
||||
|
||||
If your issue still persists, please document it as an issue in the [PG_NET Github Repo](https://github.com/supabase/pg_net/issues). You are also welcome to contact Supabase Support from your project's [Dashboard](https://supabase.com/dashboard/project/_/) for more guidance.
|
||||
If your issue still persists, please document it as an issue in the [PG_NET GitHub Repo](https://github.com/supabase/pg_net/issues). You are also welcome to contact Supabase Support from your project's [Dashboard](https://supabase.com/dashboard/project/_/) for more guidance.
|
||||
|
||||
+1
-1
@@ -8,7 +8,7 @@ database_id = "d4b45e90-9c52-4781-8cc2-d936e8f4edeb"
|
||||
|
||||
Also, Why am I getting redirected to localhost instead of my production site URL?
|
||||
|
||||
In order for the provided redirectTo option to work you must set the exact URL in the shown Redirect URL's setting.
|
||||
In order for the provided `redirectTo` option to work you must set the exact URL in the shown Redirect URL's setting.
|
||||
|
||||

|
||||
|
||||
|
||||
+1
-1
@@ -25,7 +25,7 @@ If you don't know the name of your sequence, it's often formed based on a standa
|
||||
1. Rollbacks
|
||||
One of the most common reasons for a gap is the rollback of a transaction. If you initiate a transaction that includes an insert operation, the sequence responsible for generating the ID for the new row increments. If, for any reason, the transaction doesn't complete successfully—perhaps due to a constraint violation or a deliberate decision to rollback—the insert operation is undone, but the sequence value used is not returned or reused. The documentation explains that as well:
|
||||
|
||||
> To avoid blocking concurrent transactions that obtain numbers from the same sequence, a nextval operation is never rolled back; that is, once a value has been fetched it is considered used and will not be returned again. This is true even if the surrounding transaction later aborts, or if the calling query ends up not using the value. For example an INSERT with an ON CONFLICT clause will compute the to-be-inserted tuple, including doing any required nextval calls, before detecting any conflict that would cause it to follow the ON CONFLICT rule instead. Such cases will leave unused “holes” in the sequence of assigned values.
|
||||
> To avoid blocking concurrent transactions that obtain numbers from the same sequence, a `nextval` operation is never rolled back; that is, once a value has been fetched it is considered used and will not be returned again. This is true even if the surrounding transaction later aborts, or if the calling query ends up not using the value. For example an INSERT with an ON CONFLICT clause will compute the to-be-inserted tuple, including doing any required `nextval` calls, before detecting any conflict that would cause it to follow the ON CONFLICT rule instead. Such cases will leave unused “holes” in the sequence of assigned values.
|
||||
|
||||
2. Deletions
|
||||
Removing rows from your table also creates gaps in the sequence of primary key values. Although deletions do not affect the sequence directly, they contribute to the non-sequential appearance of IDs in your table.
|
||||
|
||||
+1
-1
@@ -7,7 +7,7 @@ keywords = [ "dashboard", "health", "endpoints" ]
|
||||
database_id = "f85935d8-e871-4463-8288-f118f4e24afd"
|
||||
---
|
||||
|
||||
The dashboard makes requests to the health endpoints of the supabase services (Database, Auth, Data API, Realtime, Edge Functions to ensure everything is working). These requests appear in the charts about your project:
|
||||
The dashboard makes requests to the health endpoints of the Supabase services (Database, Auth, Data API, Realtime, Edge Functions to ensure everything is working). These requests appear in the charts about your project:
|
||||
|
||||
<img
|
||||
width="1135"
|
||||
|
||||
+2
-2
@@ -11,12 +11,12 @@ Usually this means you have RLS (row level security) enabled and no policy, or d
|
||||
|
||||
If you have RLS enabled you can test quickly by disable RLS on the table. If your query works then you have no policies or do not meet them.
|
||||
|
||||
If you have policies that depend on having a signed in user (TO set to `authenticated` or using `auth.uid()`) then you can check by setting TO as `anon` and setting your policy to just `TRUE`. If that works then you don't have a signed in user (with a jwt in the authorization header) when you make the call.
|
||||
If you have policies that depend on having a signed in user (TO set to `authenticated` or using `auth.uid()`) then you can check by setting TO as `anon` and setting your policy to just `TRUE`. If that works then you don't have a signed in user (with a JWT in the authorization header) when you make the call.
|
||||
|
||||
For more information on RLS please see https://supabase.com/docs/guides/auth/row-level-security
|
||||
|
||||
Adding:
|
||||
To test if you have a user session at time of your call you can add this function with the SQL editor and an rpc call in your code at same place as your current database call.
|
||||
To test if you have a user session at time of your call you can add this function with the SQL editor and an RPC call in your code at same place as your current database call.
|
||||
|
||||
```sql
|
||||
create function test_authorization_header() returns json
|
||||
|
||||
+6
-6
@@ -7,14 +7,14 @@ keywords = [ "rls", "service_role", "authorization", "session", "apikey" ]
|
||||
database_id = "677f0a69-e454-4718-ad92-91053d40c085"
|
||||
---
|
||||
|
||||
A Supabase client with the Authorization header set to the service role apikey will ALWAYS bypass RLS. By default the Authorization header is the apikey used in createClient. If you are getting an RLS error then you have a user session getting into the client or you initialized with the anon key. RLS in enforced based on the Authorization header and not the apikey header.
|
||||
A Supabase client with the Authorization header set to the service role API key will ALWAYS bypass RLS. By default the Authorization header is the `apikey` used in `createClient`. If you are getting an RLS error then you have a user session getting into the client or you initialized with the anon key. RLS in enforced based on the `Authorization` header and not the `apikey` header.
|
||||
|
||||
Three common cases of the createClient apikey being replaced by a user session/token:
|
||||
Three common cases of the `createClient` `apikey` being replaced by a user session/token:
|
||||
|
||||
1. SSR client initialized with service role. The SSR clients are designed to share the user session from cookies. The user session will override the default apikey from createClient in the Authorization header. If you are using SSR, always create a separate server client using supabase-js directly for service role.
|
||||
1. SSR client initialized with service role. The SSR clients are designed to share the user session from cookies. The user session will override the default `apikey` from `createClient` in the `Authorization` header. If you are using SSR, always create a separate server client using supabase-js directly for service role.
|
||||
|
||||
2. Edge functions or other server code setting the Authorization header in createClient options directly to a user token/jwt. When you set the Authorization header directly that overrides the default action of using the apikey for the Authorization header.
|
||||
2. Edge functions or other server code setting the `Authorization` header in `createClient` options directly to a user token/JWT. When you set the `Authorization` header directly that overrides the default action of using the `apikey` for the `Authorization` header.
|
||||
|
||||
3. Server client initialized with service role using signUp to create a user or other auth functions. Many auth functions will return a user session to the client making the call. When that happens the apikey will be replaced by the user token/jwt in the Authorization header. If you are wanting to create a user in a service role client use admin.createUser() instead. Otherwise use a separate Supabase client for for service role from other actions.
|
||||
3. Server client initialized with service role using `signUp` to create a user or other auth functions. Many auth functions will return a user session to the client making the call. When that happens the `apikey` will be replaced by the user token/JWT in the `Authorization` header. If you are wanting to create a user in a service role client use `admin.createUser()` instead. Otherwise use a separate Supabase client for for service role from other actions.
|
||||
|
||||
Also note that adding service_role in RLS policies does nothing. Service role will never run the policies to begin with.
|
||||
Also note that adding `service_role` in RLS policies does nothing. Service role will never run the policies to begin with.
|
||||
|
||||
+1
-1
@@ -7,7 +7,7 @@ keywords = [ "deadlock", "hang", "async" ]
|
||||
database_id = "0a6f6d33-7331-4b47-87ec-7ade7a6a5924"
|
||||
---
|
||||
|
||||
There is currently a bug in supabase-js which results in a deadlock if any async API call is made in onAuthStateChange code. If a call is made in the handler then the next Supabase call anywhere using that client will hang and not return.
|
||||
There is currently a bug in supabase-js which results in a deadlock if any async API call is made in `onAuthStateChange` code. If a call is made in the handler then the next Supabase call anywhere using that client will hang and not return.
|
||||
|
||||
For now a workaround is needed until this is fixed.
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
"postbuild": "pnpm run build:sitemap",
|
||||
"start": "next start",
|
||||
"lint": "next lint",
|
||||
"lint:mdx": "supa-mdx-lint content/guides --config ../../supa-mdx-lint.config.toml",
|
||||
"lint:mdx": "supa-mdx-lint content --config ../../supa-mdx-lint.config.toml",
|
||||
"typecheck": "tsc --noEmit",
|
||||
"pretest": "pnpm run codegen:examples",
|
||||
"test": "vitest --exclude \"**/*.smoke.test.ts\"",
|
||||
|
||||
@@ -28,19 +28,25 @@ allow_list = [
|
||||
"[Bb]lockchains?",
|
||||
"[Bb]reakpoints?",
|
||||
"[Bb]uilt-ins?",
|
||||
"[Cc]hangelogs?",
|
||||
"[Cc]odebases?",
|
||||
"[Cc]odepaths?",
|
||||
"[Cc]onfigs?",
|
||||
"[Cc]onsecutiveness",
|
||||
"[Cc]ooldowns?",
|
||||
"[Cc]ron",
|
||||
"[Dd]atasets?",
|
||||
"[Dd]atasources?",
|
||||
"[Dd]e facto",
|
||||
"[Dd]enylists?",
|
||||
"[Dd]evs?",
|
||||
"[Dd]iff(s|ing|ed)?",
|
||||
"[Dd]ropdown",
|
||||
"[Ee]nums?",
|
||||
"[Ee]nv",
|
||||
"[Ff]atals",
|
||||
"[Ff]rontend",
|
||||
"[Gg]apless",
|
||||
"[Gg]lobs?",
|
||||
"[Gg]lobstar",
|
||||
"[Gg]zip(s|ped|ping)?",
|
||||
@@ -59,25 +65,31 @@ allow_list = [
|
||||
"[Nn]onces?",
|
||||
"[Nn]ullable",
|
||||
"[Oo]ffboarding",
|
||||
"[Oo]h",
|
||||
"[Oo]nboard(ing)?",
|
||||
"[Oo]vercommit(s|ted|ting)?",
|
||||
"[Pp]arams?",
|
||||
"[Pp]laintext",
|
||||
"[Pp]olyfill(s|ed)?",
|
||||
"[Pp]oolers?",
|
||||
"[Pp]sycopg",
|
||||
"[Qq]uickstarts?",
|
||||
"[Rr]ealtime",
|
||||
"[Rr]ebas(e|ed|es|ing)",
|
||||
"[Rr]eplayability",
|
||||
"[Rr]epos?",
|
||||
"[Rr]esultingly",
|
||||
"[Rr]untimes?",
|
||||
"[Ss]erverless",
|
||||
"[Ss]erverside",
|
||||
"[Ss]itekeys?",
|
||||
"[Ss]tateful",
|
||||
"[Ss]tructs?",
|
||||
"[Ss]ubcommands?",
|
||||
"[Ss]ubdomains?",
|
||||
"[Ss]ubfolders?",
|
||||
"[Ss]ubmodules?",
|
||||
"[Ss]wappiness",
|
||||
"[Tt]odos?",
|
||||
"[Tt]radeoffs?",
|
||||
"[Uu]pserts?",
|
||||
@@ -90,13 +102,16 @@ allow_list = [
|
||||
"Astro",
|
||||
"AsyncStorage",
|
||||
"Authy",
|
||||
"B-tree",
|
||||
"Basejump",
|
||||
"BigQuery",
|
||||
"Bitbucket",
|
||||
"Bitwarden",
|
||||
"Brevo",
|
||||
"CAPTCHA",
|
||||
"Cartes Bancaires",
|
||||
"ChatGPT",
|
||||
"Citus",
|
||||
"ClickHouse",
|
||||
"Clippy",
|
||||
"Cloudflare",
|
||||
@@ -105,6 +120,7 @@ allow_list = [
|
||||
"DBeaver",
|
||||
"Database Functions?",
|
||||
"DataDog",
|
||||
"Deadpool",
|
||||
"DevTools",
|
||||
"DDoS",
|
||||
"Deno",
|
||||
@@ -112,6 +128,7 @@ allow_list = [
|
||||
"Django",
|
||||
"Docker",
|
||||
"Drizzle",
|
||||
"EnterpriseDB",
|
||||
"Entra",
|
||||
"Erlang",
|
||||
"Figma",
|
||||
@@ -124,6 +141,7 @@ allow_list = [
|
||||
"GitHub",
|
||||
"GitLab",
|
||||
"GoTrue",
|
||||
"Golang",
|
||||
"Grafana",
|
||||
"GraphQL",
|
||||
"Groonga",
|
||||
@@ -131,6 +149,7 @@ allow_list = [
|
||||
"Heroku",
|
||||
"Homebrew",
|
||||
"Hono",
|
||||
"Hyperdrive",
|
||||
"HypoPG",
|
||||
"IdP",
|
||||
"ImageMagick",
|
||||
@@ -139,6 +158,7 @@ allow_list = [
|
||||
"Infisical",
|
||||
"IntelliJ",
|
||||
"IntelliSense",
|
||||
"IOWait",
|
||||
"IVFFlat",
|
||||
"Jupyter",
|
||||
"JWTs",
|
||||
@@ -153,7 +173,10 @@ allow_list = [
|
||||
"LinkedIn",
|
||||
"LlamaIndex",
|
||||
"Llamafile",
|
||||
"Logflare",
|
||||
"Lua",
|
||||
"Mailgun",
|
||||
"Mailtrap",
|
||||
"Mansueli",
|
||||
"Metabase",
|
||||
"Mixpeek",
|
||||
@@ -270,7 +293,6 @@ allow_list = [
|
||||
"pgvector",
|
||||
"plpgsql",
|
||||
"psql",
|
||||
"psycopg",
|
||||
"scrypt",
|
||||
"sessionStorage",
|
||||
"stdin",
|
||||
@@ -288,10 +310,13 @@ allow_list = [
|
||||
"supabase-rb",
|
||||
"supabase-swift",
|
||||
"supautils",
|
||||
"tokio",
|
||||
"tsvector",
|
||||
"tvOS",
|
||||
"uBlock Origin",
|
||||
"vecs",
|
||||
"vs",
|
||||
"watchOS",
|
||||
]
|
||||
|
||||
prefixes = ["bi", "pre"]
|
||||
prefixes = ["bi", "over", "pre", "un"]
|
||||
|
||||
Reference in New Issue
Block a user