Skip to content

Commit

Permalink
Check the spelling in our code with codespell
Browse files Browse the repository at this point in the history
  • Loading branch information
akuzm committed Jan 11, 2024
1 parent 4e7edf3 commit f08dd10
Show file tree
Hide file tree
Showing 50 changed files with 107 additions and 92 deletions.
6 changes: 3 additions & 3 deletions .github/gh_matrix_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -337,9 +337,9 @@ def macos_config(overrides):
tests = set()
break
basename = os.path.basename(f)
splitted = basename.split(".")
name = splitted[0]
ext = splitted[-1]
split = basename.split(".")
name = split[0]
ext = split[-1]
if ext == "out":
# Account for the version number.
tests.add(name)
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/changelog-check.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ name: Check for changelog entry file
branches:
- main
jobs:
# Check if the PR creates a sperate file with changelog entry in the
# Check if the PR creates a separate file with changelog entry in the
# ".unreleased" folder
#
# This check can be disabled by adding the following line in the PR text
Expand Down
19 changes: 17 additions & 2 deletions .github/workflows/code_style.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ name: Code style
- main
- prerelease_test
pull_request:

jobs:
cmake_checks:
name: Check CMake files
Expand Down Expand Up @@ -53,6 +54,20 @@ jobs:
run: |
find . -type f \( -name "*.yaml" -or -name "*.yml" \) -print -exec yamllint {} \+
spelling_checks:
name: Check spelling
runs-on: ubuntu-latest
steps:
- name: Install prerequisites
run: |
pip install codespell
- name: Checkout source
uses: actions/checkout@v3
- name: Run codespell
run: |
find . -type f \( -name "*.c" -or -name "*.h" -or -name "*.yaml" -or -name "*.sh" \) \
-exec codespell -L "inh,larg,inout" {} \+
cc_checks:
name: Check code formatting
runs-on: ubuntu-22.04
Expand Down Expand Up @@ -100,7 +115,7 @@ jobs:
git diff --exit-code
misc_checks:
name: Check license, update scripts, git hooks, missing gitignore entries and unecessary template tests
name: Check license, update scripts, git hooks, missing gitignore entries and unnecessary template tests
runs-on: ubuntu-22.04
strategy:
fail-fast: false
Expand All @@ -124,7 +139,7 @@ jobs:
- name: Check for missing gitignore entries for template test files
if: always()
run: ./scripts/check_missing_gitignore_for_template_tests.sh
- name: Check for unecessary template test files
- name: Check for unnecessary template test files
if: always()
run: ./scripts/check_unecessary_template_tests.sh

2 changes: 1 addition & 1 deletion .github/workflows/update-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ jobs:
UPDATE_FROM_TAG=${DOWNGRADE_TO}-pg${PG_MAJOR}
export UPDATE_FROM_TAG
# We need to use same libssl version used in the latest official TimescaleDB container images.
# So we will use the fixed alpine version, this will guarantee that libssl version wont change.
# So we will use the fixed alpine version, this will guarantee that libssl version won't change.
PG_IMAGE_TAG="${PG_VERSION}-alpine3.17" scripts/test_downgrade_from_tag.sh
- name: Downgrade diff
Expand Down
2 changes: 1 addition & 1 deletion scripts/check_file_license.sh
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ check_file() {
return 0;
;;
(*)
echo "Unkown flag" ${1}
echo "Unknown flag" ${1}
return 1;
esac

Expand Down
2 changes: 1 addition & 1 deletion scripts/githooks/commit_msg.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def check_body_limit(self):

def check_body_uses_why(self):
"Rule 7: Use the body to explain what and why vs. how"
# Not enforcable
# Not enforceable
return True

rule_funcs = [
Expand Down
4 changes: 2 additions & 2 deletions scripts/test_downgrade_from_tag.sh
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ wait_for_pg() {
if docker_exec $1 "pg_isready -U postgres"
then
# this makes the test less flaky, although not
# ideal. Apperently, pg_isready is not always a good
# ideal. Apparently, pg_isready is not always a good
# indication of whether the DB is actually ready to accept
# queries
sleep 1
Expand Down Expand Up @@ -187,7 +187,7 @@ docker_pgcmd ${CONTAINER_ORIG} "CHECKPOINT;"

# We need the previous version shared libraries as well, so we copy
# all shared libraries out from the original container before stopping
# it. We could limit it to just the preceeding version, but this is
# it. We could limit it to just the preceding version, but this is
# more straightforward.
srcdir=$(docker exec ${CONTAINER_ORIG} /bin/bash -c 'pg_config --pkglibdir')
FILES=$(docker exec ${CONTAINER_ORIG} /bin/bash -c "ls $srcdir/timescaledb*.so")
Expand Down
2 changes: 1 addition & 1 deletion scripts/test_repair_from_tag.sh
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ wait_for_pg() {

if docker_exec $1 "pg_isready -h localhost -U postgres"; then
# this makes the test less flaky, although not
# ideal. Apperently, pg_isready is not always a good
# ideal. Apparently, pg_isready is not always a good
# indication of whether the DB is actually ready to accept
# queries
sleep 5
Expand Down
2 changes: 1 addition & 1 deletion scripts/test_update_from_tag.sh
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ wait_for_pg() {
if docker_exec $1 "pg_isready -U postgres"
then
# this makes the test less flaky, although not
# ideal. Apperently, pg_isready is not always a good
# ideal. Apparently, pg_isready is not always a good
# indication of whether the DB is actually ready to accept
# queries
sleep 1
Expand Down
2 changes: 1 addition & 1 deletion src/cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ ts_cache_init(Cache *cache)

/*
* We always want to be explicit about the memory context our hash table
* ends up in to ensure it's not accidently put in TopMemoryContext.
* ends up in to ensure it's not accidentally put in TopMemoryContext.
*/
Assert(cache->flags & HASH_CONTEXT);
cache->htab = hash_create(cache->name, cache->numelements, &cache->hctl, cache->flags);
Expand Down
14 changes: 7 additions & 7 deletions src/chunk.c
Original file line number Diff line number Diff line change
Expand Up @@ -509,7 +509,7 @@ chunk_collides(const Hypertable *ht, const Hypercube *hc)
}

/*-
* Resolve collisions and perform alignmment.
* Resolve collisions and perform alignment.
*
* Chunks collide only if their hypercubes overlap in all dimensions. For
* instance, the 2D chunks below collide because they overlap in both the X and
Expand Down Expand Up @@ -2122,7 +2122,7 @@ ts_chunk_show_chunks(PG_FUNCTION_ARGS)
funcctx = SRF_FIRSTCALL_INIT();
/*
* For INTEGER type dimensions, we support querying using intervals or any
* timetamp or date input. For such INTEGER dimensions, we get the chunks
* timestamp or date input. For such INTEGER dimensions, we get the chunks
* using their creation time values.
*/
if (IS_INTEGER_TYPE(time_type) && (arg_type == INTERVALOID || IS_TIMESTAMP_TYPE(arg_type)))
Expand Down Expand Up @@ -3497,7 +3497,7 @@ ts_chunk_is_frozen(Chunk *chunk)
}

/* only caller used to be ts_chunk_unset_frozen. This code was in PG14 block as we run into
* defined but unsed error in CI/CD builds for PG < 14. But now called from recompress as well
* a "defined but unset" error in CI/CD builds for PG < 14. But now called from recompress as well
*/
bool
ts_chunk_clear_status(Chunk *chunk, int32 status)
Expand Down Expand Up @@ -3857,7 +3857,7 @@ ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int3
{
/*
* For INTEGER type dimensions, we support querying using intervals or any
* timetamp or date input. For such INTEGER dimensions, we get the chunks
* timestamp or date input. For such INTEGER dimensions, we get the chunks
* using their creation time values.
*/
if (IS_INTEGER_TYPE(time_type) && (arg_type == INTERVALOID || IS_TIMESTAMP_TYPE(arg_type)))
Expand Down Expand Up @@ -4206,7 +4206,7 @@ ts_chunk_drop_chunks(PG_FUNCTION_ARGS)

/*
* For INTEGER type dimensions, we support querying using intervals or any
* timetamp or date input. For such INTEGER dimensions, we get the chunks
* timestamp or date input. For such INTEGER dimensions, we get the chunks
* using their creation time values.
*/
if (IS_INTEGER_TYPE(time_type) && (arg_type == INTERVALOID || IS_TIMESTAMP_TYPE(arg_type)))
Expand Down Expand Up @@ -4645,7 +4645,7 @@ add_foreign_table_as_chunk(Oid relid, Hypertable *parent_ht)
chunk->relkind,
chunk->hypertable_relid);
chunk_create_table_constraints(parent_ht, chunk);
/* Add dimension constriants for the chunk */
/* Add dimension constraints for the chunk */
ts_chunk_constraints_add_dimension_constraints(chunk->constraints, chunk->fd.id, chunk->cube);
ts_chunk_constraints_insert_metadata(chunk->constraints);
chunk_add_inheritance(chunk, parent_ht);
Expand All @@ -4657,7 +4657,7 @@ add_foreign_table_as_chunk(Oid relid, Hypertable *parent_ht)
* Noncontiguous flag should not be set since the chunk should be empty upon
* creation, with an invalid range assigned, so ordered append should be allowed.
* Once the data is moved into the OSM chunk, then our catalog should be
* udpated with proper API calls from the OSM extension.
* updated with proper API calls from the OSM extension.
*/
parent_ht->fd.status =
ts_set_flags_32(parent_ht->fd.status,
Expand Down
2 changes: 1 addition & 1 deletion src/chunk_constraint.c
Original file line number Diff line number Diff line change
Expand Up @@ -913,7 +913,7 @@ chunk_constraint_delete_metadata(TupleInfo *ti)

/*
* If this is an index constraint, we need to cleanup the index
* metadata. Don't drop the index though, since that will happend when
* metadata. Don't drop the index though, since that will happen when
* the constraint is dropped.
*/
if (OidIsValid(index_relid))
Expand Down
2 changes: 1 addition & 1 deletion src/compat/compat.h
Original file line number Diff line number Diff line change
Expand Up @@ -535,7 +535,7 @@ get_reindex_options(ReindexStmt *stmt)
/*
* PG15 added additional `force_flush` argument to shm_mq_send().
*
* Our _compat() version currently uses force_flush = true on PG15 to preseve
* Our _compat() version currently uses force_flush = true on PG15 to preserve
* the same behaviour on all supported PostgreSQL versions.
*
* https://git.postgresql.org/gitweb/?p=postgresql.git;a=commit;h=46846433
Expand Down
2 changes: 1 addition & 1 deletion src/constraint.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
#include "export.h"

/*
* Return status for constraint processsing function.
* Return status for constraint processing function.
*
* PROCESSED - count the constraint as processed
* IGNORED - the constraint wasn't processed
Expand Down
2 changes: 1 addition & 1 deletion src/hypertable.c
Original file line number Diff line number Diff line change
Expand Up @@ -1497,7 +1497,7 @@ ts_hypertable_create_internal(FunctionCallInfo fcinfo, Oid table_relid,
ts_cache_release(hcache);

/*
* Validate create_hypertable arguments and use defaults accoring to the
* Validate create_hypertable arguments and use defaults according to the
* hypertable_distributed_default guc.
*
* Validate data nodes and check permissions on them if this is a
Expand Down
2 changes: 1 addition & 1 deletion src/nodes/chunk_append/chunk_append.c
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@ ts_chunk_append_path_create(PlannerInfo *root, RelOptInfo *rel, Hypertable *ht,
Assert(flat == NULL);

/*
* if we do not have scans as direct childs of this
* if we do not have scans as direct children of this
* node we disable startup and runtime exclusion
* in this node
*/
Expand Down
2 changes: 1 addition & 1 deletion src/nodes/chunk_dispatch/chunk_insert_state.c
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ setup_on_conflict_state(ChunkInsertState *state, ChunkDispatch *dispatch,

/*
* If the chunk's tuple descriptor matches exactly the hypertable
* (the common case), we can re-use most of the parent's ON
* (the common case), we can reuse most of the parent's ON
* CONFLICT SET state, skipping a bunch of work. Otherwise, we
* need to create state specific to this partition.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ ca_append_begin(CustomScanState *node, EState *estate, int eflags)

/*
* clauses should always have the same length as appendplans because
* thats the base for building the lists
* that's the base for building the lists
*/
Assert(list_length(old_appendplans) == list_length(chunk_ri_clauses));
Assert(list_length(chunk_relids) == list_length(chunk_ri_clauses));
Expand Down
2 changes: 1 addition & 1 deletion src/partitioning.c
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ ts_partitioning_func_apply(PartitioningInfo *pinfo, Oid collation, Datum value)
* Helper function to find the right partition value from a tuple,
* for space partitioned hypertables. Since attributes in tuple can
* be of different order when compared to physical table columns order,
* we pass partition_col_idx which points to correct space parititioned
* we pass partition_col_idx which points to correct space partitioned
* column in the given tuple.
*/
TSDLLEXPORT Datum
Expand Down
2 changes: 1 addition & 1 deletion src/planner/expand_hypertable.c
Original file line number Diff line number Diff line change
Expand Up @@ -1319,7 +1319,7 @@ ts_plan_expand_hypertable_chunks(Hypertable *ht, PlannerInfo *root, RelOptInfo *
#if PG16_LT
childrte->requiredPerms = 0;
#else
/* Since PG16, the permission info is maintained separetely. Unlink
/* Since PG16, the permission info is maintained separately. Unlink
* the old perminfo from the RTE to disable permission checking.
*/
childrte->perminfoindex = 0;
Expand Down
2 changes: 1 addition & 1 deletion src/planner/planner.c
Original file line number Diff line number Diff line change
Expand Up @@ -846,7 +846,7 @@ should_chunk_append(Hypertable *ht, PlannerInfo *root, RelOptInfo *rel, Path *pa
* Even though ordered is true on the RelOptInfo we have to
* double check that current Path fulfills requirements for
* Ordered Append transformation because the RelOptInfo may
* be used for multiple Pathes.
* be used for multiple Paths.
*/
Expr *em_expr = find_em_expr_for_rel(pk->pk_eclass, rel);

Expand Down
8 changes: 4 additions & 4 deletions src/process_utility.c
Original file line number Diff line number Diff line change
Expand Up @@ -2447,7 +2447,7 @@ process_index_chunk_multitransaction(int32 hypertable_id, Oid chunk_relid, void
* We grab a ShareLock on the chunk, because that's what CREATE INDEX
* does. For the hypertable's index, we are ok using the weaker
* AccessShareLock, since we only need to prevent the index itself from
* being ALTERed or DROPed during this part of index creation.
* being ALTERed or DROPped during this part of index creation.
*/
chunk_rel = table_open(chunk_relid, ShareLock);
chunk = ts_chunk_get_by_relid(chunk_relid, true);
Expand Down Expand Up @@ -2573,7 +2573,7 @@ process_index_start(ProcessUtilityArgs *args)
ts_cache_release(hcache);
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("operation not supported on continuous aggreates that are not "
errmsg("operation not supported on continuous aggregates that are not "
"finalized"),
errhint("Recreate the continuous aggregate to allow index creation.")));
}
Expand Down Expand Up @@ -2722,7 +2722,7 @@ process_index_start(ProcessUtilityArgs *args)
* Lock the index for the remainder of the command. Since we're using
* multiple transactions for index creation, a regular
* transaction-level lock won't prevent the index from being
* concurrently ALTERed or DELETEed. Instead, we grab a session level
* concurrently ALTERed or DELETEd. Instead, we grab a session level
* lock on the index, which we'll release when the command is
* finished. (This is the same strategy postgres uses in CREATE INDEX
* CONCURRENTLY)
Expand Down Expand Up @@ -4423,7 +4423,7 @@ process_ddl_event_sql_drop(EventTriggerData *trigdata)
TS_FUNCTION_INFO_V1(ts_timescaledb_process_ddl_event);

/*
* Event trigger hook for DDL commands that have alread been handled by
* Event trigger hook for DDL commands that have already been handled by
* PostgreSQL (i.e., "ddl_command_end" and "sql_drop" events).
*/
Datum
Expand Down
4 changes: 2 additions & 2 deletions src/telemetry/functions.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ typedef struct AllowedFnHashEntry
} AllowedFnHashEntry;

// Get a HTAB of AllowedFnHashEntrys containing all and only those functions
// that are withing visible_extensions. This function should be equivalent to
// that are within visible_extensions. This function should be equivalent to
// the SQL
// SELECT objid
// FROM pg_catalog.pg_depend, pg_catalog.pg_extension extension
Expand Down Expand Up @@ -222,7 +222,7 @@ ts_function_telemetry_read(const char **visible_extensions, int num_visible_exte
* This function resets the shared function counts after we send back telemetry
* in preparation for the next recording cycle. Note that there is no way to
* atomically read and reset the counts in the shared hashmap, so writes that
* occur between sending the old counts and reseting for the next cycle will be
* occur between sending the old counts and resetting for the next cycle will be
* lost. Since this this telemetry is only ever an approximation of reality, we
* believe this loss is acceptable considering that the alternatives are
* resetting the counts whenever the telemetry is read (potentially even more
Expand Down
2 changes: 1 addition & 1 deletion src/ts_catalog/continuous_aggs_watermark.c
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,7 @@ TS_FUNCTION_INFO_V1(ts_continuous_agg_watermark_materialized);
*
* The difference between this function and `ts_continuous_agg_watermark` is
* that this one get the max open dimension of the materialization hypertable
* insted of get the stored value in the catalog table.
* instead of get the stored value in the catalog table.
*/
Datum
ts_continuous_agg_watermark_materialized(PG_FUNCTION_ARGS)
Expand Down
2 changes: 1 addition & 1 deletion test/runner.sh
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ EOF
trap cleanup EXIT

# setup clusterwide settings on first run
# we use mkdir here because it is an atomic operation unlike existance of a lockfile
# we use mkdir here because it is an atomic operation unlike existence of a lockfile
# where creating and checking are 2 separate operations
if mkdir ${TEST_OUTPUT_DIR}/.pg_init 2>/dev/null; then
${PSQL} "$@" -U ${USER} -d template1 -v ECHO=none >/dev/null 2>&1 <<EOF
Expand Down
2 changes: 1 addition & 1 deletion test/runner_shared.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ TEST_ROLE_DEFAULT_PERM_USER_2=${TEST_ROLE_DEFAULT_PERM_USER_2:-default_perm_user
shift

# setup clusterwide settings on first run
# we use mkdir here because it is an atomic operation unlike existance of a lockfile
# we use mkdir here because it is an atomic operation unlike existence of a lockfile
# where creating and checking are 2 separate operations
if mkdir ${TEST_OUTPUT_DIR}/.pg_init 2>/dev/null; then
${PSQL} "$@" -U ${USER} -d postgres -v ECHO=none -c "ALTER USER ${TEST_ROLE_SUPERUSER} WITH SUPERUSER;" >/dev/null
Expand Down
2 changes: 1 addition & 1 deletion test/src/bgw/log.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ bgw_log_insert_relation(Relation rel, char *msg)

/* Insert a new entry into public.bgw_log
* This table is used for testing as a way for mock background jobs
* to insert messges into a log that could then be output into the golden file
* to insert messages into a log that could then be output into the golden file
*/
static void
bgw_log_insert(char *msg)
Expand Down
2 changes: 1 addition & 1 deletion test/src/net/test_http.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ num_test_strings()
return sizeof(TEST_LENGTHS) / sizeof(TEST_LENGTHS[0]);
}

/* Check we can succesfully parse partial by well-formed HTTP responses */
/* Check we can successfully parse partial by well-formed HTTP responses */
Datum
ts_test_http_parsing(PG_FUNCTION_ARGS)
{
Expand Down
Loading

0 comments on commit f08dd10

Please sign in to comment.