diff --git a/.github/workflows/build-binaries.yml b/.github/workflows/build-binaries.yml index dba298a379..552eb56e46 100644 --- a/.github/workflows/build-binaries.yml +++ b/.github/workflows/build-binaries.yml @@ -7,7 +7,6 @@ on: description: "Version number" required: true type: string - defaults: run: working-directory: ./backend @@ -49,9 +48,9 @@ jobs: - name: Package into node binary run: | if [ "${{ matrix.os }}" != "linux" ]; then - pkg --no-bytecode --public-packages "*" --public --target ${{ matrix.target }}-${{ matrix.arch }} --output ./binary/infisical-core-${{ matrix.os }}-${{ matrix.arch }} . + pkg --no-bytecode --public-packages "*" --public --compress GZip --target ${{ matrix.target }}-${{ matrix.arch }} --output ./binary/infisical-core-${{ matrix.os }}-${{ matrix.arch }} . else - pkg --no-bytecode --public-packages "*" --public --target ${{ matrix.target }}-${{ matrix.arch }} --output ./binary/infisical-core . + pkg --no-bytecode --public-packages "*" --public --compress GZip --target ${{ matrix.target }}-${{ matrix.arch }} --output ./binary/infisical-core . fi # Set up .deb package structure (Debian/Ubuntu only) @@ -83,6 +82,86 @@ jobs: dpkg-deb --build infisical-core mv infisical-core.deb ./binary/infisical-core-${{matrix.arch}}.deb + ### RPM + + # Set up .rpm package structure + - name: Set up .rpm package structure + if: matrix.os == 'linux' + run: | + mkdir -p infisical-core-rpm/usr/local/bin + cp ./binary/infisical-core infisical-core-rpm/usr/local/bin/ + chmod +x infisical-core-rpm/usr/local/bin/infisical-core + + # Install RPM build tools + - name: Install RPM build tools + if: matrix.os == 'linux' + run: sudo apt-get update && sudo apt-get install -y rpm + + # Create .spec file for RPM + - name: Create .spec file for RPM + if: matrix.os == 'linux' + run: | + cat < infisical-core.spec + + %global _enable_debug_package 0 + %global debug_package %{nil} + %global __os_install_post /usr/lib/rpm/brp-compress %{nil} + + Name: infisical-core + Version: ${{ github.event.inputs.version }} + Release: 1%{?dist} + Summary: Infisical Core standalone executable + License: Proprietary + URL: https://app.infisical.com + + %description + Infisical Core standalone executable (app.infisical.com) + + %install + mkdir -p %{buildroot}/usr/local/bin + cp %{_sourcedir}/infisical-core %{buildroot}/usr/local/bin/ + + %files + /usr/local/bin/infisical-core + + %pre + + %post + + %preun + + %postun + EOF + + # Build .rpm file + - name: Build .rpm package + if: matrix.os == 'linux' + run: | + # Create necessary directories + mkdir -p rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS} + + # Copy the binary directly to SOURCES + cp ./binary/infisical-core rpmbuild/SOURCES/ + + # Run rpmbuild with verbose output + rpmbuild -vv -bb \ + --define "_topdir $(pwd)/rpmbuild" \ + --define "_sourcedir $(pwd)/rpmbuild/SOURCES" \ + --define "_rpmdir $(pwd)/rpmbuild/RPMS" \ + --target ${{ matrix.arch == 'x64' && 'x86_64' || 'aarch64' }} \ + infisical-core.spec + + # Try to find the RPM file + find rpmbuild -name "*.rpm" + + # Move the RPM file if found + if [ -n "$(find rpmbuild -name '*.rpm')" ]; then + mv $(find rpmbuild -name '*.rpm') ./binary/infisical-core-${{matrix.arch}}.rpm + else + echo "RPM file not found!" + exit 1 + fi + - uses: actions/setup-python@v4 with: python-version: "3.x" # Specify the Python version you need @@ -97,6 +176,12 @@ jobs: working-directory: ./backend run: cloudsmith push deb --republish --no-wait-for-sync --api-key=${{ secrets.CLOUDSMITH_API_KEY }} infisical/infisical-core/any-distro/any-version ./binary/infisical-core-${{ matrix.arch }}.deb + # Publish .rpm file to Cloudsmith (Red Hat-based systems only) + - name: Publish .rpm to Cloudsmith + if: matrix.os == 'linux' + working-directory: ./backend + run: cloudsmith push rpm --republish --no-wait-for-sync --api-key=${{ secrets.CLOUDSMITH_API_KEY }} infisical/infisical-core/any-distro/any-version ./binary/infisical-core-${{ matrix.arch }}.rpm + # Publish .exe file to Cloudsmith (Windows only) - name: Publish to Cloudsmith (Windows) if: matrix.os == 'win' diff --git a/.github/workflows/build-staging-and-deploy-aws.yml b/.github/workflows/build-staging-and-deploy-aws.yml index 347b6f5eed..84f22e3ccc 100644 --- a/.github/workflows/build-staging-and-deploy-aws.yml +++ b/.github/workflows/build-staging-and-deploy-aws.yml @@ -127,6 +127,7 @@ jobs: - name: Change directory to backend and install dependencies env: DB_CONNECTION_URI: ${{ secrets.DB_CONNECTION_URI }} + AUDIT_LOGS_DB_CONNECTION_URI: ${{ secrets.AUDIT_LOGS_DB_CONNECTION_URI }} run: | cd backend npm install diff --git a/backend/package-lock.json b/backend/package-lock.json index 31521fb7d3..847eb2ba66 100644 --- a/backend/package-lock.json +++ b/backend/package-lock.json @@ -61,7 +61,7 @@ "jwks-rsa": "^3.1.0", "knex": "^3.0.1", "ldapjs": "^3.0.7", - "ldif": "^0.5.1", + "ldif": "0.5.1", "libsodium-wrappers": "^0.7.13", "lodash.isequal": "^4.5.0", "mongodb": "^6.8.1", diff --git a/backend/scripts/generate-schema-types.ts b/backend/scripts/generate-schema-types.ts index c7b1fcc7a9..fc398c2acf 100644 --- a/backend/scripts/generate-schema-types.ts +++ b/backend/scripts/generate-schema-types.ts @@ -90,7 +90,12 @@ const main = async () => { .whereRaw("table_schema = current_schema()") .select<{ tableName: string }[]>("table_name as tableName") .orderBy("table_name") - ).filter((el) => !el.tableName.includes("_migrations") && !el.tableName.includes("partitioned_audit_logs_")); + ).filter( + (el) => + !el.tableName.includes("_migrations") && + !el.tableName.includes("audit_logs_") && + el.tableName !== "intermediate_audit_logs" + ); for (let i = 0; i < tables.length; i += 1) { const { tableName } = tables[i]; diff --git a/backend/src/@types/knex.d.ts b/backend/src/@types/knex.d.ts index d40a4f1489..6249152762 100644 --- a/backend/src/@types/knex.d.ts +++ b/backend/src/@types/knex.d.ts @@ -170,9 +170,6 @@ import { TOrgRoles, TOrgRolesInsert, TOrgRolesUpdate, - TPartitionedAuditLogs, - TPartitionedAuditLogsInsert, - TPartitionedAuditLogsUpdate, TPkiAlerts, TPkiAlertsInsert, TPkiAlertsUpdate, @@ -718,11 +715,6 @@ declare module "knex/types/tables" { TAuditLogStreamsInsert, TAuditLogStreamsUpdate >; - [TableName.PartitionedAuditLog]: KnexOriginal.CompositeTableType< - TPartitionedAuditLogs, - TPartitionedAuditLogsInsert, - TPartitionedAuditLogsUpdate - >; [TableName.GitAppInstallSession]: KnexOriginal.CompositeTableType< TGitAppInstallSessions, TGitAppInstallSessionsInsert, diff --git a/backend/src/db/manual-migrations/partition-audit-logs.ts b/backend/src/db/manual-migrations/partition-audit-logs.ts new file mode 100644 index 0000000000..382ef0dbff --- /dev/null +++ b/backend/src/db/manual-migrations/partition-audit-logs.ts @@ -0,0 +1,161 @@ +import kx, { Knex } from "knex"; + +import { TableName } from "../schemas"; + +const INTERMEDIATE_AUDIT_LOG_TABLE = "intermediate_audit_logs"; + +const formatPartitionDate = (date: Date) => { + const year = date.getFullYear(); + const month = String(date.getMonth() + 1).padStart(2, "0"); + const day = String(date.getDate()).padStart(2, "0"); + + return `${year}-${month}-${day}`; +}; + +const createAuditLogPartition = async (knex: Knex, startDate: Date, endDate: Date) => { + const startDateStr = formatPartitionDate(startDate); + const endDateStr = formatPartitionDate(endDate); + + const partitionName = `${TableName.AuditLog}_${startDateStr.replace(/-/g, "")}_${endDateStr.replace(/-/g, "")}`; + + await knex.schema.raw( + `CREATE TABLE ${partitionName} PARTITION OF ${TableName.AuditLog} FOR VALUES FROM ('${startDateStr}') TO ('${endDateStr}')` + ); +}; + +const up = async (knex: Knex): Promise => { + console.info("Dropping primary key of audit log table..."); + await knex.schema.alterTable(TableName.AuditLog, (t) => { + // remove existing keys + t.dropPrimary(); + }); + + // Get all indices of the audit log table and drop them + const indexNames: { rows: { indexname: string }[] } = await knex.raw( + ` + SELECT indexname + FROM pg_indexes + WHERE tablename = '${TableName.AuditLog}' + ` + ); + + console.log( + "Deleting existing audit log indices:", + indexNames.rows.map((e) => e.indexname) + ); + + for await (const row of indexNames.rows) { + await knex.raw(`DROP INDEX IF EXISTS ${row.indexname}`); + } + + // renaming audit log to intermediate table + console.log("Renaming audit log table to the intermediate name"); + await knex.schema.renameTable(TableName.AuditLog, INTERMEDIATE_AUDIT_LOG_TABLE); + + if (!(await knex.schema.hasTable(TableName.AuditLog))) { + const createTableSql = knex.schema + .createTable(TableName.AuditLog, (t) => { + t.uuid("id").defaultTo(knex.fn.uuid()); + t.string("actor").notNullable(); + t.jsonb("actorMetadata").notNullable(); + t.string("ipAddress"); + t.string("eventType").notNullable(); + t.jsonb("eventMetadata"); + t.string("userAgent"); + t.string("userAgentType"); + t.datetime("expiresAt"); + t.timestamps(true, true, true); + t.uuid("orgId"); + t.string("projectId"); + t.string("projectName"); + t.primary(["id", "createdAt"]); + }) + .toString(); + + console.info("Creating partition table..."); + await knex.schema.raw(` + ${createTableSql} PARTITION BY RANGE ("createdAt"); + `); + + console.log("Adding indices..."); + await knex.schema.alterTable(TableName.AuditLog, (t) => { + t.index(["projectId", "createdAt"]); + t.index(["orgId", "createdAt"]); + t.index("expiresAt"); + t.index("orgId"); + t.index("projectId"); + }); + + console.log("Adding GIN indices..."); + + await knex.raw( + `CREATE INDEX IF NOT EXISTS "audit_logs_actorMetadata_idx" ON ${TableName.AuditLog} USING gin("actorMetadata" jsonb_path_ops)` + ); + console.log("GIN index for actorMetadata done"); + + await knex.raw( + `CREATE INDEX IF NOT EXISTS "audit_logs_eventMetadata_idx" ON ${TableName.AuditLog} USING gin("eventMetadata" jsonb_path_ops)` + ); + console.log("GIN index for eventMetadata done"); + + // create default partition + console.log("Creating default partition..."); + await knex.schema.raw(`CREATE TABLE ${TableName.AuditLog}_default PARTITION OF ${TableName.AuditLog} DEFAULT`); + + const nextDate = new Date(); + nextDate.setDate(nextDate.getDate() + 1); + const nextDateStr = formatPartitionDate(nextDate); + + console.log("Attaching existing audit log table as a partition..."); + await knex.schema.raw(` + ALTER TABLE ${INTERMEDIATE_AUDIT_LOG_TABLE} ADD CONSTRAINT audit_log_old + CHECK ( "createdAt" < DATE '${nextDateStr}' ); + + ALTER TABLE ${TableName.AuditLog} ATTACH PARTITION ${INTERMEDIATE_AUDIT_LOG_TABLE} + FOR VALUES FROM (MINVALUE) TO ('${nextDateStr}' ); + `); + + // create partition from now until end of month + console.log("Creating audit log partitions ahead of time... next date:", nextDateStr); + await createAuditLogPartition(knex, nextDate, new Date(nextDate.getFullYear(), nextDate.getMonth() + 1)); + + // create partitions 4 years ahead + const partitionMonths = 4 * 12; + const partitionPromises: Promise[] = []; + for (let x = 1; x <= partitionMonths; x += 1) { + partitionPromises.push( + createAuditLogPartition( + knex, + new Date(nextDate.getFullYear(), nextDate.getMonth() + x, 1), + new Date(nextDate.getFullYear(), nextDate.getMonth() + (x + 1), 1) + ) + ); + } + + await Promise.all(partitionPromises); + console.log("Partition migration complete"); + } +}; + +export const executeMigration = async (url: string) => { + console.log("Executing migration..."); + const knex = kx({ + client: "pg", + connection: url + }); + + await knex.transaction(async (tx) => { + await up(tx); + }); +}; + +const dbUrl = process.env.AUDIT_LOGS_DB_CONNECTION_URI; +if (!dbUrl) { + console.error("Please provide a DB connection URL to the AUDIT_LOGS_DB_CONNECTION_URI env"); + process.exit(1); +} + +void executeMigration(dbUrl).then(() => { + console.log("Migration: partition-audit-logs DONE"); + process.exit(0); +}); diff --git a/backend/src/db/migrations/20241007052449_partition-audit-logs.ts b/backend/src/db/migrations/20241007052449_partition-audit-logs.ts deleted file mode 100644 index e132bb5efa..0000000000 --- a/backend/src/db/migrations/20241007052449_partition-audit-logs.ts +++ /dev/null @@ -1,164 +0,0 @@ -import { Knex } from "knex"; - -import { TableName } from "../schemas"; - -const formatPartitionDate = (date: Date) => { - const year = date.getFullYear(); - const month = String(date.getMonth() + 1).padStart(2, "0"); - const day = String(date.getDate()).padStart(2, "0"); - - return `${year}-${month}-${day}`; -}; - -const createAuditLogPartition = async (knex: Knex, startDate: Date, endDate: Date) => { - const startDateStr = formatPartitionDate(startDate); - const endDateStr = formatPartitionDate(endDate); - - const partitionName = `${TableName.PartitionedAuditLog}_${startDateStr.replace(/-/g, "")}_${endDateStr.replace( - /-/g, - "" - )}`; - - await knex.schema.raw( - `CREATE TABLE ${partitionName} PARTITION OF ${TableName.PartitionedAuditLog} FOR VALUES FROM ('${startDateStr}') TO ('${endDateStr}')` - ); -}; - -const isUsingDedicatedAuditLogDb = Boolean(process.env.AUDIT_LOGS_DB_CONNECTION_URI); - -export async function up(knex: Knex): Promise { - if (!isUsingDedicatedAuditLogDb && (await knex.schema.hasTable(TableName.AuditLog))) { - console.info("Dropping primary key of Audit Log table..."); - await knex.schema.alterTable(TableName.AuditLog, (t) => { - // remove existing keys - t.dropPrimary(); - }); - } - - // create a new partitioned table for audit logs - if (!(await knex.schema.hasTable(TableName.PartitionedAuditLog))) { - const createTableSql = knex.schema - .createTable(TableName.PartitionedAuditLog, (t) => { - t.uuid("id").defaultTo(knex.fn.uuid()); - t.string("actor").notNullable(); - t.jsonb("actorMetadata").notNullable(); - t.string("ipAddress"); - t.string("eventType").notNullable(); - t.jsonb("eventMetadata"); - t.string("userAgent"); - t.string("userAgentType"); - t.datetime("expiresAt"); - t.timestamps(true, true, true); - t.uuid("orgId"); - t.string("projectId"); - t.string("projectName"); - t.primary(["id", "createdAt"]); - }) - .toString(); - - console.info("Creating partition table..."); - await knex.schema.raw(` - ${createTableSql} PARTITION BY RANGE ("createdAt"); - `); - - console.log("Adding indices..."); - await knex.schema.alterTable(TableName.PartitionedAuditLog, (t) => { - t.index(["projectId", "createdAt"]); - t.index(["orgId", "createdAt"]); - t.index("expiresAt"); - t.index("orgId"); - t.index("projectId"); - }); - - console.log("Adding GIN indices..."); - - await knex.raw( - `CREATE INDEX IF NOT EXISTS "audit_logs_actorMetadata_idx" ON ${TableName.PartitionedAuditLog} USING gin("actorMetadata" jsonb_path_ops)` - ); - console.log("GIN index for actorMetadata done"); - - await knex.raw( - `CREATE INDEX IF NOT EXISTS "audit_logs_eventMetadata_idx" ON ${TableName.PartitionedAuditLog} USING gin("eventMetadata" jsonb_path_ops)` - ); - console.log("GIN index for eventMetadata done"); - - // create default partition - console.log("Creating default partition..."); - await knex.schema.raw( - `CREATE TABLE ${TableName.PartitionedAuditLog}_default PARTITION OF ${TableName.PartitionedAuditLog} DEFAULT` - ); - - const nextDate = new Date(); - nextDate.setDate(nextDate.getDate() + 1); - const nextDateStr = formatPartitionDate(nextDate); - - // attach existing audit log table as a partition ONLY if using the same DB - if (!isUsingDedicatedAuditLogDb) { - console.log("Attaching existing audit log table as a partition..."); - await knex.schema.raw(` - ALTER TABLE ${TableName.AuditLog} ADD CONSTRAINT audit_log_old - CHECK ( "createdAt" < DATE '${nextDateStr}' ); - - ALTER TABLE ${TableName.PartitionedAuditLog} ATTACH PARTITION ${TableName.AuditLog} - FOR VALUES FROM (MINVALUE) TO ('${nextDateStr}' ); - `); - } - - // create partition from now until end of month - console.log("Creating audit log partitions ahead of time... next date:", nextDateStr); - await createAuditLogPartition(knex, nextDate, new Date(nextDate.getFullYear(), nextDate.getMonth() + 1)); - - // create partitions 4 years ahead - const partitionMonths = 4 * 12; - const partitionPromises: Promise[] = []; - for (let x = 1; x <= partitionMonths; x += 1) { - partitionPromises.push( - createAuditLogPartition( - knex, - new Date(nextDate.getFullYear(), nextDate.getMonth() + x, 1), - new Date(nextDate.getFullYear(), nextDate.getMonth() + (x + 1), 1) - ) - ); - } - - await Promise.all(partitionPromises); - console.log("Partition migration complete"); - } -} - -export async function down(knex: Knex): Promise { - const partitionSearchResult = await knex.raw(` - SELECT inhrelid::regclass::text - FROM pg_inherits - WHERE inhparent::regclass::text = '${TableName.PartitionedAuditLog}' - AND inhrelid::regclass::text = '${TableName.AuditLog}' - `); - - const isAuditLogAPartition = partitionSearchResult.rows.length > 0; - if (isAuditLogAPartition) { - // detach audit log from partition - console.log("Detaching original audit log table from new partition table..."); - await knex.schema.raw(` - ALTER TABLE ${TableName.PartitionedAuditLog} DETACH PARTITION ${TableName.AuditLog}; - - ALTER TABLE ${TableName.AuditLog} DROP CONSTRAINT audit_log_old; - `); - - // revert audit log modifications - console.log("Reverting changes made to the audit log table..."); - if (await knex.schema.hasTable(TableName.AuditLog)) { - await knex.schema.alterTable(TableName.AuditLog, (t) => { - // we drop this first because adding to the partition results in a new primary key - t.dropPrimary(); - - // add back the original keys of the audit logs table - t.primary(["id"], { - constraintName: "audit_logs_pkey" - }); - }); - } - } - - await knex.schema.dropTableIfExists(TableName.PartitionedAuditLog); - console.log("Partition rollback complete"); -} diff --git a/backend/src/db/schemas/audit-logs.ts b/backend/src/db/schemas/audit-logs.ts index b8906698b6..d1c239724c 100644 --- a/backend/src/db/schemas/audit-logs.ts +++ b/backend/src/db/schemas/audit-logs.ts @@ -20,7 +20,8 @@ export const AuditLogsSchema = z.object({ createdAt: z.date(), updatedAt: z.date(), orgId: z.string().uuid().nullable().optional(), - projectId: z.string().nullable().optional() + projectId: z.string().nullable().optional(), + projectName: z.string().nullable().optional() }); export type TAuditLogs = z.infer; diff --git a/backend/src/db/schemas/index.ts b/backend/src/db/schemas/index.ts index 86ff5e612d..4fcf26c1a3 100644 --- a/backend/src/db/schemas/index.ts +++ b/backend/src/db/schemas/index.ts @@ -55,7 +55,6 @@ export * from "./org-bots"; export * from "./org-memberships"; export * from "./org-roles"; export * from "./organizations"; -export * from "./partitioned-audit-logs"; export * from "./pki-alerts"; export * from "./pki-collection-items"; export * from "./pki-collections"; diff --git a/backend/src/db/schemas/models.ts b/backend/src/db/schemas/models.ts index 4e241439ec..08f3e79cee 100644 --- a/backend/src/db/schemas/models.ts +++ b/backend/src/db/schemas/models.ts @@ -90,7 +90,6 @@ export enum TableName { OidcConfig = "oidc_configs", LdapGroupMap = "ldap_group_maps", AuditLog = "audit_logs", - PartitionedAuditLog = "partitioned_audit_logs", AuditLogStream = "audit_log_streams", GitAppInstallSession = "git_app_install_sessions", GitAppOrg = "git_app_org", diff --git a/backend/src/db/schemas/partitioned-audit-logs.ts b/backend/src/db/schemas/partitioned-audit-logs.ts deleted file mode 100644 index dd9500e7a7..0000000000 --- a/backend/src/db/schemas/partitioned-audit-logs.ts +++ /dev/null @@ -1,29 +0,0 @@ -// Code generated by automation script, DO NOT EDIT. -// Automated by pulling database and generating zod schema -// To update. Just run npm run generate:schema -// Written by akhilmhdh. - -import { z } from "zod"; - -import { TImmutableDBKeys } from "./models"; - -export const PartitionedAuditLogsSchema = z.object({ - id: z.string().uuid(), - actor: z.string(), - actorMetadata: z.unknown(), - ipAddress: z.string().nullable().optional(), - eventType: z.string(), - eventMetadata: z.unknown().nullable().optional(), - userAgent: z.string().nullable().optional(), - userAgentType: z.string().nullable().optional(), - expiresAt: z.date().nullable().optional(), - createdAt: z.date(), - updatedAt: z.date(), - orgId: z.string().uuid().nullable().optional(), - projectId: z.string().nullable().optional(), - projectName: z.string().nullable().optional() -}); - -export type TPartitionedAuditLogs = z.infer; -export type TPartitionedAuditLogsInsert = Omit, TImmutableDBKeys>; -export type TPartitionedAuditLogsUpdate = Partial, TImmutableDBKeys>>; diff --git a/backend/src/ee/routes/v1/project-router.ts b/backend/src/ee/routes/v1/project-router.ts index aefca8a7bc..e3956731eb 100644 --- a/backend/src/ee/routes/v1/project-router.ts +++ b/backend/src/ee/routes/v1/project-router.ts @@ -1,6 +1,6 @@ import { z } from "zod"; -import { PartitionedAuditLogsSchema, SecretSnapshotsSchema } from "@app/db/schemas"; +import { AuditLogsSchema, SecretSnapshotsSchema } from "@app/db/schemas"; import { EventType, UserAgentType } from "@app/ee/services/audit-log/audit-log-types"; import { AUDIT_LOGS, PROJECTS } from "@app/lib/api-docs"; import { getLastMidnightDateISO, removeTrailingSlash } from "@app/lib/fn"; @@ -120,7 +120,7 @@ export const registerProjectRouter = async (server: FastifyZodProvider) => { }), response: { 200: z.object({ - auditLogs: PartitionedAuditLogsSchema.omit({ + auditLogs: AuditLogsSchema.omit({ eventMetadata: true, eventType: true, actor: true, diff --git a/backend/src/ee/services/audit-log/audit-log-dal.ts b/backend/src/ee/services/audit-log/audit-log-dal.ts index 214fc7b4f5..b2c80aa0b5 100644 --- a/backend/src/ee/services/audit-log/audit-log-dal.ts +++ b/backend/src/ee/services/audit-log/audit-log-dal.ts @@ -26,7 +26,7 @@ type TFindQuery = { }; export const auditLogDALFactory = (db: TDbClient) => { - const auditLogOrm = ormify(db, TableName.PartitionedAuditLog); + const auditLogOrm = ormify(db, TableName.AuditLog); const find = async ( { @@ -55,13 +55,13 @@ export const auditLogDALFactory = (db: TDbClient) => { try { // Find statements - const sqlQuery = (tx || db.replicaNode())(TableName.PartitionedAuditLog) + const sqlQuery = (tx || db.replicaNode())(TableName.AuditLog) // eslint-disable-next-line func-names .where(function () { if (orgId) { - void this.where(`${TableName.PartitionedAuditLog}.orgId`, orgId); + void this.where(`${TableName.AuditLog}.orgId`, orgId); } else if (projectId) { - void this.where(`${TableName.PartitionedAuditLog}.projectId`, projectId); + void this.where(`${TableName.AuditLog}.projectId`, projectId); } }); @@ -71,10 +71,10 @@ export const auditLogDALFactory = (db: TDbClient) => { // Select statements void sqlQuery - .select(selectAllTableCols(TableName.PartitionedAuditLog)) + .select(selectAllTableCols(TableName.AuditLog)) .limit(limit) .offset(offset) - .orderBy(`${TableName.PartitionedAuditLog}.createdAt`, "desc"); + .orderBy(`${TableName.AuditLog}.createdAt`, "desc"); // Special case: Filter by actor ID if (actorId) { @@ -100,10 +100,10 @@ export const auditLogDALFactory = (db: TDbClient) => { // Filter by date range if (startDate) { - void sqlQuery.where(`${TableName.PartitionedAuditLog}.createdAt`, ">=", startDate); + void sqlQuery.where(`${TableName.AuditLog}.createdAt`, ">=", startDate); } if (endDate) { - void sqlQuery.where(`${TableName.PartitionedAuditLog}.createdAt`, "<=", endDate); + void sqlQuery.where(`${TableName.AuditLog}.createdAt`, "<=", endDate); } // we timeout long running queries to prevent DB resource issues (2 minutes) @@ -135,13 +135,13 @@ export const auditLogDALFactory = (db: TDbClient) => { logger.info(`${QueueName.DailyResourceCleanUp}: audit log started`); do { try { - const findExpiredLogSubQuery = (tx || db)(TableName.PartitionedAuditLog) + const findExpiredLogSubQuery = (tx || db)(TableName.AuditLog) .where("expiresAt", "<", today) .select("id") .limit(AUDIT_LOG_PRUNE_BATCH_SIZE); // eslint-disable-next-line no-await-in-loop - deletedAuditLogIds = await (tx || db)(TableName.PartitionedAuditLog) + deletedAuditLogIds = await (tx || db)(TableName.AuditLog) .whereIn("id", findExpiredLogSubQuery) .del() .returning("id"); diff --git a/backend/src/server/routes/v1/integration-router.ts b/backend/src/server/routes/v1/integration-router.ts index 2a09da5262..86d3218524 100644 --- a/backend/src/server/routes/v1/integration-router.ts +++ b/backend/src/server/routes/v1/integration-router.ts @@ -52,7 +52,13 @@ export const registerIntegrationRouter = async (server: FastifyZodProvider) => { }), response: { 200: z.object({ - integration: IntegrationsSchema + integration: IntegrationsSchema.extend({ + environment: z.object({ + slug: z.string().trim(), + name: z.string().trim(), + id: z.string().trim() + }) + }) }) } }, @@ -138,7 +144,13 @@ export const registerIntegrationRouter = async (server: FastifyZodProvider) => { }), response: { 200: z.object({ - integration: IntegrationsSchema + integration: IntegrationsSchema.extend({ + environment: z.object({ + slug: z.string().trim(), + name: z.string().trim(), + id: z.string().trim() + }) + }) }) } }, diff --git a/backend/src/server/routes/v1/organization-router.ts b/backend/src/server/routes/v1/organization-router.ts index 68e1864684..d19317c4be 100644 --- a/backend/src/server/routes/v1/organization-router.ts +++ b/backend/src/server/routes/v1/organization-router.ts @@ -1,18 +1,16 @@ import { z } from "zod"; import { + AuditLogsSchema, GroupsSchema, IncidentContactsSchema, OrganizationsSchema, OrgMembershipsSchema, OrgRolesSchema, - PartitionedAuditLogsSchema, UsersSchema } from "@app/db/schemas"; import { EventType, UserAgentType } from "@app/ee/services/audit-log/audit-log-types"; import { AUDIT_LOGS, ORGANIZATIONS } from "@app/lib/api-docs"; -import { getConfig } from "@app/lib/config/env"; -import { BadRequestError } from "@app/lib/errors"; import { getLastMidnightDateISO } from "@app/lib/fn"; import { readLimit, writeLimit } from "@app/server/config/rateLimiter"; import { verifyAuth } from "@app/server/plugins/auth/verify-auth"; @@ -117,7 +115,7 @@ export const registerOrgRouter = async (server: FastifyZodProvider) => { response: { 200: z.object({ - auditLogs: PartitionedAuditLogsSchema.omit({ + auditLogs: AuditLogsSchema.omit({ eventMetadata: true, eventType: true, actor: true, @@ -141,11 +139,6 @@ export const registerOrgRouter = async (server: FastifyZodProvider) => { }, onRequest: verifyAuth([AuthMode.JWT]), handler: async (req) => { - const appCfg = getConfig(); - if (appCfg.isCloud) { - throw new BadRequestError({ message: "Infisical cloud audit log is in maintenance mode." }); - } - const auditLogs = await server.services.auditLog.listAuditLogs({ filter: { ...req.query, diff --git a/backend/src/services/integration/integration-service.ts b/backend/src/services/integration/integration-service.ts index 4f4b26e247..24e1dd5c49 100644 --- a/backend/src/services/integration/integration-service.ts +++ b/backend/src/services/integration/integration-service.ts @@ -120,7 +120,13 @@ export const integrationServiceFactory = ({ secretPath, projectId: integrationAuth.projectId }); - return { integration, integrationAuth }; + return { + integration: { + ...integration, + environment: folder.environment + }, + integrationAuth + }; }; const updateIntegration = async ({ @@ -183,7 +189,10 @@ export const integrationServiceFactory = ({ projectId: folder.projectId }); - return updatedIntegration; + return { + ...updatedIntegration, + environment: folder.environment + }; }; const getIntegration = async ({ id, actor, actorAuthMethod, actorId, actorOrgId }: TGetIntegrationDTO) => { diff --git a/docs/documentation/platform/admin-panel/org-admin-console.mdx b/docs/documentation/platform/admin-panel/org-admin-console.mdx new file mode 100644 index 0000000000..e1290fa6b8 --- /dev/null +++ b/docs/documentation/platform/admin-panel/org-admin-console.mdx @@ -0,0 +1,32 @@ +--- +title: "Organization Admin Console" +description: "Manage your Infisical organization from our organization admin console." +--- + +The Organization Admin Console provides a user-friendly interface for Infisical organization admins to manage organization-related configurations. + +## Accessing the Organization Admin Console + +Only organization admins have access to the Organization Admin Console. + +![Access Organization Admin Panel](/images/platform/admin-panels/access-org-admin-console.png) + +1. Click on the profile icon in the left sidebar. +2. From the dropdown menu, select `Organization Admin Console`. + +## Projects Section + +![Projects Section](/images/platform/admin-panels/org-admin-console-projects.png) + +The Projects Section lists all projects created within your organization, including those you do not have membership in. You can easily search for a project by name using the search bar. + +### Accessing a Project in Your Organization + +If you want to access a project in which you are not a member but are an organization admin, follow these steps: + +![Access project](/images/platform/admin-panels/org-admin-console-access.png) + +1. Click on the three-dot icon next to the project you wish to access. +2. Click on the **Access** button. + +This will grant you admin permissions for the selected project and generate an audit log of your access, ensuring transparency regarding admin privileges. diff --git a/docs/documentation/platform/admin-panel/overview.mdx b/docs/documentation/platform/admin-panel/overview.mdx new file mode 100644 index 0000000000..0505d6b2a8 --- /dev/null +++ b/docs/documentation/platform/admin-panel/overview.mdx @@ -0,0 +1,25 @@ +--- +description: "Learn about Infisical's Admin Panel." +--- + +The Infisical Admin Panel allows you to configure and manage various resources within your organization and server. + + + + Configure and manage your server settings effectively. + + + + Manage settings specific to your organization. + + diff --git a/docs/documentation/platform/admin-panel/server-admin.mdx b/docs/documentation/platform/admin-panel/server-admin.mdx new file mode 100644 index 0000000000..5b71109eef --- /dev/null +++ b/docs/documentation/platform/admin-panel/server-admin.mdx @@ -0,0 +1,70 @@ +--- +title: "Server Admin Panel" +description: "Manage your Infisical server from the Server Admin Panel." +--- + +The Server Admin Panel provides a user interface for Infisical server administrators to configure various parameters as needed. This includes configuring rate limits, managing allowed signups, and more. + +## Accessing the Server Admin Panel + +The first user who created the account in Infisical is designated as the server administrator. You can access the admin panel by navigating as follows: + +![Access Server Admin Panel](/images/platform/admin-panels/access-server-admin-panel.png) + +1. Click on the profile icon in the left sidebar. +2. From the dropdown menu, select `Server Admin Panel`. + +## General Section + +![General Settings](/images/platform/admin-panels/admin-panel-general.png) + +### Allow User Signups + +This setting controls whether users can sign up for your Infisical instance. The options are: + +1. **Anyone**: Any user with access to your instance can sign up. +2. **Disabled**: No one will be able to sign up. + +### Restrict Signup Domain + +This setting allows only users with specific email domains (such as your organization's domain) to sign up. + +### Default Organization + +Use this setting if you want all users accessing your Infisical instance to log in through your configured SAML/LDAP provider. This prevents users from manually entering their organization slug during authentication and redirects them to the SAML/LDAP authentication page. + +### Trust Emails + +By default, Infisical does not trust emails logged in via SAML/LDAP/OIDC due to the potential for email spoofing. Users must verify their email addresses before proceeding. You can disable this validation if you are running an Infisical instance within your organization and trust incoming emails from your members. + +## Authentication Section + +![Authentication Settings](/images/platform/admin-panels/admin-panel-auths.png) + +This section allows you to configure various login and signup methods for your instance. + +## Rate Limit Section + +![Rate Limit Settings](/images/platform/admin-panels/admin-panel-rate-limits.png) + +Configure the rate limits for your Infisical instance across various endpoints. You do not need to redeploy when making changes to rate limits; they will be automatically synchronized to all instances. + + + Note that rate limit configuration is a paid feature. Please contact sales@infisical.com to purchase a license for its use. + + +## User Management Section + +![User Management](/images/platform/admin-panels/admin-panel-users.png) + +The User Management section lists all users who have signed up for your instance. You can search for users using the search bar. + +To delete a user from Infisical: + +1. Search for the user. +2. Click the cross button next to the user. +3. Confirm the warning popup. + + + Note that user management configuration is a paid feature. Please contact sales@infisical.com to purchase a license for its use. + diff --git a/docs/images/platform/admin-panels/access-org-admin-console.png b/docs/images/platform/admin-panels/access-org-admin-console.png new file mode 100644 index 0000000000..fc4b3dcc15 Binary files /dev/null and b/docs/images/platform/admin-panels/access-org-admin-console.png differ diff --git a/docs/images/platform/admin-panels/access-server-admin-panel.png b/docs/images/platform/admin-panels/access-server-admin-panel.png new file mode 100644 index 0000000000..65402a1767 Binary files /dev/null and b/docs/images/platform/admin-panels/access-server-admin-panel.png differ diff --git a/docs/images/platform/admin-panels/admin-panel-auths.png b/docs/images/platform/admin-panels/admin-panel-auths.png new file mode 100644 index 0000000000..a0abd5d9a6 Binary files /dev/null and b/docs/images/platform/admin-panels/admin-panel-auths.png differ diff --git a/docs/images/platform/admin-panels/admin-panel-general.png b/docs/images/platform/admin-panels/admin-panel-general.png new file mode 100644 index 0000000000..bce175cf0d Binary files /dev/null and b/docs/images/platform/admin-panels/admin-panel-general.png differ diff --git a/docs/images/platform/admin-panels/admin-panel-integration.png b/docs/images/platform/admin-panels/admin-panel-integration.png new file mode 100644 index 0000000000..43bedd17ec Binary files /dev/null and b/docs/images/platform/admin-panels/admin-panel-integration.png differ diff --git a/docs/images/platform/admin-panels/admin-panel-rate-limits.png b/docs/images/platform/admin-panels/admin-panel-rate-limits.png new file mode 100644 index 0000000000..d8f689f1ea Binary files /dev/null and b/docs/images/platform/admin-panels/admin-panel-rate-limits.png differ diff --git a/docs/images/platform/admin-panels/admin-panel-users.png b/docs/images/platform/admin-panels/admin-panel-users.png new file mode 100644 index 0000000000..94add6d857 Binary files /dev/null and b/docs/images/platform/admin-panels/admin-panel-users.png differ diff --git a/docs/images/platform/admin-panels/org-admin-console-access.png b/docs/images/platform/admin-panels/org-admin-console-access.png new file mode 100644 index 0000000000..6aba5b21aa Binary files /dev/null and b/docs/images/platform/admin-panels/org-admin-console-access.png differ diff --git a/docs/images/platform/admin-panels/org-admin-console-projects.png b/docs/images/platform/admin-panels/org-admin-console-projects.png new file mode 100644 index 0000000000..13b8bcfce9 Binary files /dev/null and b/docs/images/platform/admin-panels/org-admin-console-projects.png differ diff --git a/docs/mint.json b/docs/mint.json index eb29ae12e6..b7cd7abb83 100644 --- a/docs/mint.json +++ b/docs/mint.json @@ -187,6 +187,14 @@ "documentation/platform/workflow-integrations/slack-integration" ] }, + { + "group": "Admin Panel", + "pages": [ + "documentation/platform/admin-panel/overview", + "documentation/platform/admin-panel/server-admin", + "documentation/platform/admin-panel/org-admin-console" + ] + }, "documentation/platform/secret-sharing" ] }, diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 5227088b62..47745b1259 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -1,5 +1,5 @@ { - "name": "relock-npm-lock-v2-SvMQeF", + "name": "frontend", "lockfileVersion": 3, "requires": true, "packages": { @@ -40,7 +40,7 @@ "@radix-ui/react-toast": "^1.1.5", "@radix-ui/react-tooltip": "^1.0.7", "@reduxjs/toolkit": "^1.8.3", - "@sindresorhus/slugify": "^2.2.1", + "@sindresorhus/slugify": "1.1.0", "@stripe/react-stripe-js": "^1.16.3", "@stripe/stripe-js": "^1.46.0", "@tanstack/react-query": "^4.23.0", @@ -5943,54 +5943,44 @@ "dev": true }, "node_modules/@sindresorhus/slugify": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/@sindresorhus/slugify/-/slugify-2.2.1.tgz", - "integrity": "sha512-MkngSCRZ8JdSOCHRaYd+D01XhvU3Hjy6MGl06zhOk614hp9EOAp5gIkBeQg7wtmxpitU6eAL4kdiRMcJa2dlrw==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/slugify/-/slugify-1.1.0.tgz", + "integrity": "sha512-ujZRbmmizX26yS/HnB3P9QNlNa4+UvHh+rIse3RbOXLp8yl6n1TxB4t7NHggtVgS8QmmOtzXo48kCxZGACpkPw==", + "license": "MIT", "dependencies": { - "@sindresorhus/transliterate": "^1.0.0", - "escape-string-regexp": "^5.0.0" + "@sindresorhus/transliterate": "^0.1.1", + "escape-string-regexp": "^4.0.0" }, "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/@sindresorhus/slugify/node_modules/escape-string-regexp": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", - "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", - "engines": { - "node": ">=12" + "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/@sindresorhus/transliterate": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/@sindresorhus/transliterate/-/transliterate-1.6.0.tgz", - "integrity": "sha512-doH1gimEu3A46VX6aVxpHTeHrytJAG6HgdxntYnCFiIFHEM/ZGpG8KiZGBChchjQmG0XFIBL552kBTjVcMZXwQ==", + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/@sindresorhus/transliterate/-/transliterate-0.1.2.tgz", + "integrity": "sha512-5/kmIOY9FF32nicXH+5yLNTX4NJ4atl7jRgqAJuIn/iyDFXBktOKDxCvyGE/EzmF4ngSUvjXxQUQlQiZ5lfw+w==", + "license": "MIT", "dependencies": { - "escape-string-regexp": "^5.0.0" + "escape-string-regexp": "^2.0.0", + "lodash.deburr": "^4.1.0" }, "engines": { - "node": ">=12" + "node": ">=10" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/@sindresorhus/transliterate/node_modules/escape-string-regexp": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", - "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz", + "integrity": "sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==", + "license": "MIT", "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=8" } }, "node_modules/@storybook/addon-actions": { @@ -17278,6 +17268,12 @@ "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==", "dev": true }, + "node_modules/lodash.deburr": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/lodash.deburr/-/lodash.deburr-4.1.0.tgz", + "integrity": "sha512-m/M1U1f3ddMCs6Hq2tAsYThTBDaAKFDX3dwDo97GEYzamXi9SqUpjWi/Rrj/gf3X2n8ktwgZrlP1z6E3v/IExQ==", + "license": "MIT" + }, "node_modules/lodash.isplainobject": { "version": "4.0.6", "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz", diff --git a/frontend/package.json b/frontend/package.json index 42467ced44..95ce228412 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -12,6 +12,11 @@ "storybook": "storybook dev -p 6006 -s ./public", "build-storybook": "storybook build" }, + "overrides": { + "@storybook/nextjs": { + "sharp": "npm:dry-uninstall" + } + }, "dependencies": { "@casl/ability": "^6.5.0", "@casl/react": "^3.1.0", @@ -48,7 +53,7 @@ "@radix-ui/react-toast": "^1.1.5", "@radix-ui/react-tooltip": "^1.0.7", "@reduxjs/toolkit": "^1.8.3", - "@sindresorhus/slugify": "^2.2.1", + "@sindresorhus/slugify": "1.1.0", "@stripe/react-stripe-js": "^1.16.3", "@stripe/stripe-js": "^1.46.0", "@tanstack/react-query": "^4.23.0", diff --git a/frontend/src/hooks/api/auditLogs/types.tsx b/frontend/src/hooks/api/auditLogs/types.tsx index 76eb585177..567e1d0b20 100644 --- a/frontend/src/hooks/api/auditLogs/types.tsx +++ b/frontend/src/hooks/api/auditLogs/types.tsx @@ -887,4 +887,5 @@ export type AuditLog = { createdAt: string; updatedAt: string; projectName?: string; + projectId?: string; }; diff --git a/frontend/src/views/Org/AuditLogsPage/AuditLogsPage.tsx b/frontend/src/views/Org/AuditLogsPage/AuditLogsPage.tsx index 3fef393403..2b6ec67447 100644 --- a/frontend/src/views/Org/AuditLogsPage/AuditLogsPage.tsx +++ b/frontend/src/views/Org/AuditLogsPage/AuditLogsPage.tsx @@ -1,4 +1,3 @@ -import { NoticeBanner } from "@app/components/v2"; import { OrgPermissionActions, OrgPermissionSubjects } from "@app/context"; import { withPermission } from "@app/hoc"; @@ -11,15 +10,9 @@ export const AuditLogsPage = withPermission(

Audit Logs

- {(window.location.origin.includes("https://app.infisical.com") || - window.location.origin.includes("https://gamma.infisical.com")) && ( - - We are currently working on improving the performance of audit log queries. During this time, querying logs is temporarily disabled. However, audit logs are still being generated as usual, so there is no disruption to log collection. - - )}
- {!window.location.origin.includes("https://app.infisical.com") && } +
); diff --git a/frontend/src/views/Org/AuditLogsPage/components/LogsTableRow.tsx b/frontend/src/views/Org/AuditLogsPage/components/LogsTableRow.tsx index e4620a477e..2e8e018719 100644 --- a/frontend/src/views/Org/AuditLogsPage/components/LogsTableRow.tsx +++ b/frontend/src/views/Org/AuditLogsPage/components/LogsTableRow.tsx @@ -573,7 +573,7 @@ export const LogsTableRow = ({ auditLog, isOrgAuditLogs, showActorColumn }: Prop {formatDate(auditLog.createdAt)} {`${eventToNameMap[auditLog.event.type]}`} - {isOrgAuditLogs && {auditLog?.projectName ?? "N/A"}} + {isOrgAuditLogs && {auditLog?.projectName ?? auditLog?.projectId ?? "N/A"}} {showActorColumn && renderActor(auditLog.actor)} {renderSource()} {renderMetadata(auditLog.event)} diff --git a/frontend/src/views/SecretOverviewPage/SecretOverviewPage.tsx b/frontend/src/views/SecretOverviewPage/SecretOverviewPage.tsx index 50347adaa2..f46b7d9291 100644 --- a/frontend/src/views/SecretOverviewPage/SecretOverviewPage.tsx +++ b/frontend/src/views/SecretOverviewPage/SecretOverviewPage.tsx @@ -805,7 +805,7 @@ export const SecretOverviewPage = () => { resetSelectedEntries={resetSelectedEntries} />
- + @@ -1003,24 +1003,24 @@ export const SecretOverviewPage = () => {
- {!isOverviewLoading && totalCount > 0 && ( - - } - className="border-t border-solid border-t-mineshaft-600" - count={totalCount} - page={page} - perPage={perPage} - onChangePage={(newPage) => setPage(newPage)} - onChangePerPage={(newPerPage) => setPerPage(newPerPage)} - /> - )}
+ {!isOverviewLoading && totalCount > 0 && ( + + } + className="rounded-b-md border-t border-solid border-t-mineshaft-600" + count={totalCount} + page={page} + perPage={perPage} + onChangePage={(newPage) => setPage(newPage)} + onChangePerPage={(newPerPage) => setPerPage(newPerPage)} + /> + )}