chore: 清理macOS同步产生的重复文件

详细说明:
- 删除了352个带数字后缀的重复文件
- 更新.gitignore防止未来产生此类文件
- 这些文件是由iCloud或其他同步服务冲突产生的
- 不影响项目功能,仅清理冗余文件
This commit is contained in:
Yep_Q
2025-09-08 12:06:01 +08:00
parent 1564396449
commit d6f48d6d14
365 changed files with 2039 additions and 68301 deletions

View File

@@ -1,19 +0,0 @@
import { defineConfig } from 'eslint/config';
import { baseConfig } from '@n8n/eslint-config/base';
export default defineConfig(baseConfig, {
rules: {
'@typescript-eslint/naming-convention': [
'error',
// Add exception for Docker Compose labels
{
selector: 'objectLiteralProperty',
format: null, // Allow any format
filter: {
regex: '^com\\.docker\\.',
match: true,
},
},
],
},
});

View File

@@ -1,12 +0,0 @@
/**
* n8n Test Containers
*
* This package provides container management utilities for n8n testing.
*/
export { createN8NStack } from './n8n-test-container-creation';
export type { N8NConfig, N8NStack } from './n8n-test-container-creation';
export * from './performance-plans';
export { ContainerTestHelpers } from './n8n-test-container-helpers';

View File

@@ -1,321 +0,0 @@
import { PostgreSqlContainer } from '@testcontainers/postgresql';
import { RedisContainer } from '@testcontainers/redis';
import { setTimeout as wait } from 'node:timers/promises';
import type { StartedNetwork, StartedTestContainer } from 'testcontainers';
import { GenericContainer, Wait } from 'testcontainers';
import { createSilentLogConsumer } from './n8n-test-container-utils';
export async function setupRedis({
redisImage,
projectName,
network,
}: {
redisImage: string;
projectName: string;
network: StartedNetwork;
}): Promise<StartedTestContainer> {
return await new RedisContainer(redisImage)
.withNetwork(network)
.withNetworkAliases('redis')
.withLabels({
'com.docker.compose.project': projectName,
'com.docker.compose.service': 'redis',
})
.withName(`${projectName}-redis`)
.withReuse()
.start();
}
export async function setupPostgres({
postgresImage,
projectName,
network,
}: {
postgresImage: string;
projectName: string;
network: StartedNetwork;
}): Promise<{
container: StartedTestContainer;
database: string;
username: string;
password: string;
}> {
const postgres = await new PostgreSqlContainer(postgresImage)
.withNetwork(network)
.withNetworkAliases('postgres')
.withDatabase('n8n_db')
.withUsername('n8n_user')
.withPassword('test_password')
.withStartupTimeout(30000)
.withLabels({
'com.docker.compose.project': projectName,
'com.docker.compose.service': 'postgres',
})
.withName(`${projectName}-postgres`)
.withReuse()
.start();
return {
container: postgres,
database: postgres.getDatabase(),
username: postgres.getUsername(),
password: postgres.getPassword(),
};
}
/**
* Setup NGINX for multi-main instances
* @param nginxImage The Docker image for NGINX.
* @param uniqueSuffix A unique suffix for naming and labeling.
* @param mainInstances An array of running backend container instances.
* @param network The shared Docker network.
* @param nginxPort The host port to expose for NGINX.
* @returns A promise that resolves to the started NGINX container.
*/
export async function setupNginxLoadBalancer({
nginxImage,
projectName,
mainCount,
network,
port,
}: {
nginxImage: string;
projectName: string;
mainCount: number;
network: StartedNetwork;
port: number;
}): Promise<StartedTestContainer> {
// Generate upstream server entries from the list of main instances.
const upstreamServers = Array.from(
{ length: mainCount },
(_, index) => ` server ${projectName}-n8n-main-${index + 1}:5678;`,
).join('\n');
// Build the NGINX configuration with dynamic upstream servers.
// This allows us to have the port allocation be dynamic.
const nginxConfig = buildNginxConfig(upstreamServers);
const { consumer, throwWithLogs } = createSilentLogConsumer();
try {
return await new GenericContainer(nginxImage)
.withNetwork(network)
.withExposedPorts({ container: 80, host: port })
.withCopyContentToContainer([{ content: nginxConfig, target: '/etc/nginx/nginx.conf' }])
.withWaitStrategy(Wait.forListeningPorts())
.withLabels({
'com.docker.compose.project': projectName,
'com.docker.compose.service': 'nginx-lb',
})
.withName(`${projectName}-nginx-lb`)
.withReuse()
.withLogConsumer(consumer)
.start();
} catch (error) {
return throwWithLogs(error);
}
}
/**
* Builds NGINX configuration for load balancing n8n instances
* @param upstreamServers The upstream server entries to include in the configuration
* @returns The complete NGINX configuration as a string
*/
function buildNginxConfig(upstreamServers: string): string {
return `
events {
worker_connections 1024;
}
http {
client_max_body_size 50M;
access_log off;
error_log /dev/stderr warn;
# Map for WebSocket upgrades
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
upstream backend {
# Use ip_hash for sticky sessions
ip_hash;
${upstreamServers}
keepalive 32;
}
server {
listen 80;
# Set longer timeouts for slow operations
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
location / {
proxy_pass http://backend;
# Forward standard proxy headers
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Forward WebSocket headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_http_version 1.1;
proxy_buffering off;
}
# Specific location for real-time push/websockets
location /rest/push {
proxy_pass http://backend;
# Forward standard proxy headers
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# Configure WebSocket proxying
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_http_version 1.1;
# Disable buffering for real-time data
proxy_buffering off;
# Set very long timeouts for persistent connections
proxy_read_timeout 3600s;
proxy_send_timeout 3600s;
}
}
}`;
}
/**
* Builds Caddy configuration for load balancing n8n instances
* @param upstreamServers Array of upstream server addresses
* @returns The complete Caddyfile configuration as a string
*/
function buildCaddyConfig(upstreamServers: string[]): string {
const backends = upstreamServers.join(' ');
return `
:80 {
# Reverse proxy with load balancing
reverse_proxy ${backends} {
# Enable sticky sessions using cookie
lb_policy cookie
# Health check (optional)
health_uri /healthz
health_interval 10s
# Timeouts
transport http {
dial_timeout 60s
read_timeout 60s
write_timeout 60s
}
}
# Set max request body size
request_body {
max_size 50MB
}
}`;
}
/**
* Setup Caddy for multi-main instances
* @param caddyImage The Docker image for Caddy
* @param projectName Project name for container naming
* @param mainCount Number of main instances
* @param network The shared Docker network
* @returns A promise that resolves to the started Caddy container
*/
export async function setupCaddyLoadBalancer({
caddyImage = 'caddy:2-alpine',
projectName,
mainCount,
network,
}: {
caddyImage?: string;
projectName: string;
mainCount: number;
network: StartedNetwork;
}): Promise<StartedTestContainer> {
// Generate upstream server addresses
const upstreamServers = Array.from(
{ length: mainCount },
(_, index) => `${projectName}-n8n-main-${index + 1}:5678`,
);
// Build the Caddy configuration
const caddyConfig = buildCaddyConfig(upstreamServers);
const { consumer, throwWithLogs } = createSilentLogConsumer();
try {
return await new GenericContainer(caddyImage)
.withNetwork(network)
.withExposedPorts(80)
.withCopyContentToContainer([{ content: caddyConfig, target: '/etc/caddy/Caddyfile' }])
.withWaitStrategy(Wait.forListeningPorts())
.withLabels({
'com.docker.compose.project': projectName,
'com.docker.compose.service': 'caddy-lb',
})
.withName(`${projectName}-caddy-lb`)
.withReuse()
.withLogConsumer(consumer)
.start();
} catch (error) {
return throwWithLogs(error);
}
}
/**
* Polls a container's HTTP endpoint until it returns a 200 status.
* Logs a warning if the endpoint does not return 200 within the specified timeout.
*
* @param container The started container.
* @param endpoint The HTTP health check endpoint (e.g., '/healthz/readiness').
* @param timeoutMs Total timeout in milliseconds (default: 60,000ms).
*/
export async function pollContainerHttpEndpoint(
container: StartedTestContainer,
endpoint: string,
timeoutMs: number = 60000,
): Promise<void> {
const startTime = Date.now();
const url = `http://${container.getHost()}:${container.getFirstMappedPort()}${endpoint}`;
const retryIntervalMs = 1000;
while (Date.now() - startTime < timeoutMs) {
try {
const response = await fetch(url);
if (response.status === 200) {
return;
}
} catch (error) {
// Don't log errors, just retry
}
await wait(retryIntervalMs);
}
console.error(
`WARNING: HTTP endpoint at ${url} did not return 200 within ${
timeoutMs / 1000
} seconds. Proceeding with caution.`,
);
}
// TODO: Look at Ollama container?
// TODO: Look at MariaDB container?
// TODO: Look at MockServer container, could we use this for mocking out external services?

View File

@@ -1,26 +0,0 @@
import type { Readable } from 'stream';
/**
* Create a log consumer that does not log to the console
* @returns A tuple containing the log consumer and a function to throw an error with logs
*/
export function createSilentLogConsumer() {
const logs: string[] = [];
const consumer = (stream: Readable) => {
stream.on('data', (chunk: Buffer | string) => {
logs.push(chunk.toString().trim());
});
};
const throwWithLogs = (error: unknown): never => {
if (logs.length > 0) {
console.error('\n--- Container Logs ---');
console.error(logs.join('\n'));
console.error('---------------------\n');
}
throw error;
};
return { consumer, throwWithLogs };
}

View File

@@ -1,30 +0,0 @@
{
"name": "n8n-containers",
"private": true,
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"stack": "tsx ./n8n-start-stack.ts",
"stack:help": "tsx ./n8n-start-stack.ts --help",
"stack:sqlite": "TESTCONTAINERS_REUSE_ENABLE=true npm run stack",
"stack:postgres": "TESTCONTAINERS_REUSE_ENABLE=true npm run stack -- --postgres",
"stack:queue": "TESTCONTAINERS_REUSE_ENABLE=true npm run stack -- --queue",
"stack:multi-main": "TESTCONTAINERS_REUSE_ENABLE=true npm run stack -- --mains 2 --workers 1",
"stack:starter": "TESTCONTAINERS_REUSE_ENABLE=true npm run stack -- --plan starter",
"stack:clean:containers": "docker ps -aq --filter 'name=n8n-stack-*' | xargs -r docker rm -f 2>/dev/null",
"stack:clean:networks": "docker network ls --filter 'label=org.testcontainers=true' -q | xargs -r docker network rm 2>/dev/null",
"stack:clean:all": "pnpm run stack:clean:containers && pnpm run stack:clean:networks",
"lint": "eslint . --quiet",
"lint:fix": "eslint . --fix"
},
"keywords": [],
"author": "",
"license": "ISC",
"devDependencies": {
"@testcontainers/postgresql": "^11.0.3",
"@testcontainers/redis": "^11.0.3",
"get-port": "^7.1.0",
"testcontainers": "^11.0.3"
}
}