Expand Aurask k3s production overlay and release pipeline
All checks were successful
aurask-release / build-and-deploy (push) Successful in 3m7s

This commit is contained in:
Aaron 2026-04-19 18:05:46 +08:00
parent d7a836a041
commit 1ae23d44c1
26 changed files with 704 additions and 168 deletions

View File

@ -7,9 +7,12 @@ on:
paths:
- .gitea/workflows/aurask-release.yml
- api/**
- protal/**
- manager/**
- deploy/images/aurask-api/**
- deploy/images/aurask-web/**
- deploy/k3s/base/**
- deploy/images/aurask-manager/**
- deploy/k3s/**
- deploy/k3s/README.md
- tests/**
- pyproject.toml
@ -27,6 +30,7 @@ env:
DEPLOY_HOST: 64.90.15.15
DEPLOY_USER: root
AURASK_NAMESPACE: aurask
KUSTOMIZE_PATH: /tmp/aurask-release/overlays/production
jobs:
build-and-deploy:
@ -67,13 +71,21 @@ jobs:
- name: Build and push aurask-web image
run: |
web_image="${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/aurask-web"
docker build -t "${web_image}:${GITHUB_SHA}" -t "${web_image}:latest" deploy/images/aurask-web
docker build -t "${web_image}:${GITHUB_SHA}" -t "${web_image}:latest" -f deploy/images/aurask-web/Dockerfile .
docker push "${web_image}:${GITHUB_SHA}"
docker push "${web_image}:latest"
- name: Deploy aurask base manifests
- name: Build and push aurask-manager image
run: |
scp -i ~/.ssh/id_ed25519 -r deploy/k3s/base "${DEPLOY_USER}@${DEPLOY_HOST}:/tmp/aurask-release"
manager_image="${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/aurask-manager"
docker build -t "${manager_image}:${GITHUB_SHA}" -t "${manager_image}:latest" -f deploy/images/aurask-manager/Dockerfile .
docker push "${manager_image}:${GITHUB_SHA}"
docker push "${manager_image}:latest"
- name: Deploy aurask production overlay
run: |
ssh -i ~/.ssh/id_ed25519 "${DEPLOY_USER}@${DEPLOY_HOST}" "rm -rf /tmp/aurask-release"
scp -i ~/.ssh/id_ed25519 -r deploy/k3s "${DEPLOY_USER}@${DEPLOY_HOST}:/tmp/aurask-release"
ssh -i ~/.ssh/id_ed25519 "${DEPLOY_USER}@${DEPLOY_HOST}" "
set -euo pipefail
kubectl create namespace ${AURASK_NAMESPACE} --dry-run=client -o yaml | kubectl apply -f -
@ -82,10 +94,21 @@ jobs:
--docker-username='${{ secrets.REGISTRY_USER }}' \
--docker-password='${{ secrets.REGISTRY_PASSWORD }}' \
--dry-run=client -o yaml | kubectl apply -f -
kubectl apply -k /tmp/aurask-release
kubectl -n ${AURASK_NAMESPACE} create secret generic aurask-postgres \
--from-literal=POSTGRES_DB='${{ secrets.POSTGRES_DB }}' \
--from-literal=POSTGRES_USER='${{ secrets.POSTGRES_USER }}' \
--from-literal=POSTGRES_PASSWORD='${{ secrets.POSTGRES_PASSWORD }}' \
--dry-run=client -o yaml | kubectl apply -f -
kubectl apply -k ${KUSTOMIZE_PATH}
kubectl -n ${AURASK_NAMESPACE} set image deployment/aurask-api api=${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/aurask-api:${GITHUB_SHA}
kubectl -n ${AURASK_NAMESPACE} set image deployment/aurask-worker worker=${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/aurask-api:${GITHUB_SHA}
kubectl -n ${AURASK_NAMESPACE} set image deployment/aurask-web web=${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/aurask-web:${GITHUB_SHA}
kubectl -n ${AURASK_NAMESPACE} set image deployment/aurask-manager manager=${REGISTRY_HOST}/${REGISTRY_NAMESPACE}/aurask-manager:${GITHUB_SHA}
kubectl -n ${AURASK_NAMESPACE} rollout status deployment/aurask-api --timeout=600s
kubectl -n ${AURASK_NAMESPACE} rollout status deployment/aurask-worker --timeout=600s
kubectl -n ${AURASK_NAMESPACE} rollout status deployment/aurask-web --timeout=600s
kubectl -n ${AURASK_NAMESPACE} rollout status deployment/aurask-manager --timeout=600s
kubectl -n ${AURASK_NAMESPACE} rollout status statefulset/postgres --timeout=600s
kubectl -n ${AURASK_NAMESPACE} rollout status statefulset/redis --timeout=600s
kubectl -n ${AURASK_NAMESPACE} get pods -o wide
"

View File

@ -4,13 +4,65 @@ from __future__ import annotations
import argparse
import json
import signal
import time
from pathlib import Path
from aurask.api import run_server
from aurask.app import create_app
from aurask.app import AuraskApp, create_app
DEFAULT_DATA_PATH = Path(".aurask/state.json")
DEFAULT_WORKER_HEARTBEAT_SECONDS = 30
def worker_self_check(app: AuraskApp, data_path: str | None) -> dict[str, object]:
return {
"message": "Aurask worker self-check passed",
"data_path": data_path,
"workflow_templates": len(app.store.list("workflow_templates")),
"plans": len(app.store.list("plans")),
}
def run_worker(
data_path: str | None,
*,
reset: bool = False,
once: bool = False,
heartbeat_seconds: int = DEFAULT_WORKER_HEARTBEAT_SECONDS,
) -> None:
app = create_app(data_path, reset=reset)
print(json.dumps(worker_self_check(app, data_path), ensure_ascii=False, indent=2))
if once:
return
keep_running = True
def _stop_worker(signum: int, _frame) -> None:
nonlocal keep_running
keep_running = False
print(f"Aurask worker received signal {signum}, shutting down")
for signum in (getattr(signal, "SIGINT", None), getattr(signal, "SIGTERM", None)):
if signum is not None:
signal.signal(signum, _stop_worker)
try:
while keep_running:
print(
json.dumps(
{
"message": "Aurask worker heartbeat",
"data_path": data_path,
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
},
ensure_ascii=False,
)
)
time.sleep(heartbeat_seconds)
except KeyboardInterrupt:
print("Aurask worker stopped")
def main() -> None:
@ -24,6 +76,12 @@ def main() -> None:
serve_parser.add_argument("--port", type=int, default=8080)
serve_parser.add_argument("--reset", action="store_true", help="Reset local MVP state before serving")
worker_parser = subparsers.add_parser("worker", help="Run the Aurask worker process")
worker_parser.add_argument("--data", default=str(DEFAULT_DATA_PATH), help="JSON state file for MVP persistence")
worker_parser.add_argument("--reset", action="store_true", help="Reset local MVP state before starting the worker")
worker_parser.add_argument("--once", action="store_true", help="Run a startup self-check and exit")
worker_parser.add_argument("--heartbeat-seconds", type=int, default=DEFAULT_WORKER_HEARTBEAT_SECONDS)
demo_parser = subparsers.add_parser("demo", help="Bootstrap a tenant and run a safe template workflow")
demo_parser.add_argument("--data", default=str(DEFAULT_DATA_PATH), help="JSON state file for MVP persistence")
demo_parser.add_argument("--reset", action="store_true", help="Reset local MVP state before running demo")
@ -36,6 +94,15 @@ def main() -> None:
run_server(app, host=args.host, port=args.port)
return
if command == "worker":
run_worker(
args.data,
reset=args.reset,
once=args.once,
heartbeat_seconds=max(1, args.heartbeat_seconds),
)
return
if command == "demo":
app = create_app(args.data, reset=args.reset)
bootstrap = app.bootstrap_demo()

View File

@ -0,0 +1,3 @@
FROM caddy:2-alpine
COPY manager /usr/share/caddy

View File

@ -1,3 +1,3 @@
FROM caddy:2-alpine
COPY index.html /usr/share/caddy/index.html
COPY protal /usr/share/caddy

View File

@ -1,138 +0,0 @@
<!doctype html>
<html lang="zh-CN">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Aurask</title>
<style>
:root {
color-scheme: dark;
font-family: Inter, "Segoe UI", sans-serif;
background: #0b1020;
color: #e5e7eb;
}
body {
margin: 0;
min-height: 100vh;
display: grid;
place-items: center;
background: radial-gradient(circle at top, #1f3a8a 0, #0b1020 45%);
}
main {
width: min(920px, calc(100vw - 32px));
padding: 40px;
border-radius: 24px;
background: rgba(15, 23, 42, 0.86);
box-shadow: 0 30px 80px rgba(15, 23, 42, 0.45);
border: 1px solid rgba(148, 163, 184, 0.18);
}
h1 {
margin: 0 0 12px;
font-size: 44px;
}
p {
margin: 0 0 16px;
line-height: 1.7;
color: #cbd5e1;
}
.grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(220px, 1fr));
gap: 16px;
margin: 28px 0;
}
.card {
padding: 18px;
border-radius: 18px;
background: rgba(30, 41, 59, 0.9);
border: 1px solid rgba(148, 163, 184, 0.14);
}
.label {
font-size: 12px;
letter-spacing: 0.08em;
text-transform: uppercase;
color: #93c5fd;
margin-bottom: 10px;
}
code {
color: #fde68a;
word-break: break-all;
}
a {
color: #93c5fd;
text-decoration: none;
}
a:hover {
text-decoration: underline;
}
.status {
display: inline-flex;
align-items: center;
gap: 10px;
padding: 10px 14px;
border-radius: 999px;
background: rgba(30, 41, 59, 0.9);
border: 1px solid rgba(148, 163, 184, 0.18);
margin-top: 8px;
}
.dot {
width: 10px;
height: 10px;
border-radius: 999px;
background: #f59e0b;
box-shadow: 0 0 12px rgba(245, 158, 11, 0.65);
}
.dot.ok {
background: #22c55e;
box-shadow: 0 0 12px rgba(34, 197, 94, 0.65);
}
</style>
</head>
<body>
<main>
<h1>Aurask</h1>
<p>当前站点提供 Aurask MVP 网关入口与部署说明页。生产可用接口位于同域名下的 <code>/api</code> 前缀。</p>
<div class="status">
<span class="dot" id="health-dot"></span>
<span id="health-text">正在检查 API 健康状态...</span>
</div>
<div class="grid">
<section class="card">
<div class="label">Health</div>
<div><a href="/api/health" target="_blank" rel="noreferrer">/api/health</a></div>
</section>
<section class="card">
<div class="label">Plans</div>
<div><a href="/api/plans" target="_blank" rel="noreferrer">/api/plans</a></div>
</section>
<section class="card">
<div class="label">Bootstrap</div>
<div><code>POST /api/demo/bootstrap</code></div>
</section>
<section class="card">
<div class="label">Source</div>
<div><a href="https://git.mydevcloud.love/devcloud-admin/aurask.git" target="_blank" rel="noreferrer">aurask.git</a></div>
</section>
</div>
<p>鉴权接口使用 <code>Authorization: Bearer &lt;api_key&gt;</code>。当前前台页面为轻量入口页,后端能力直接来自 Aurask `master` 分支的 MVP 网关。</p>
</main>
<script>
async function checkHealth() {
const dot = document.getElementById("health-dot");
const text = document.getElementById("health-text");
try {
const response = await fetch("/api/health", { cache: "no-store" });
if (!response.ok) {
throw new Error("HTTP " + response.status);
}
const payload = await response.json();
dot.classList.add("ok");
text.textContent = "API 正常: " + (payload.service || "aurask");
} catch (error) {
text.textContent = "API 检查失败: " + error.message;
}
}
checkHealth();
</script>
</body>
</html>

View File

@ -2,7 +2,7 @@
## 当前 DevCloud 落地方案
当前 `master` 分支已经对接 DevCloud 的实际部署形态,先以 **Aurask MVP 网关 + 轻量前端入口页** 方式稳定上线,再逐步扩展为文档中规划的完整生产架构
当前 `master` 分支已经对接 DevCloud 的实际部署形态。仓库采用 **base + production overlay** 的混合结构:`deploy/k3s/base` 保持可复用默认清单,`deploy/k3s/overlays/production` 绑定当前 3 节点 DevCloud 集群、NodePort 和节点调度策略
### 当前已落地组件
@ -13,9 +13,34 @@
- Service`NodePort 30091`
- `aurask-web`
- 镜像:`registry.mydevcloud.love/devcloud/aurask-web`
- 来源:`protal/`
- 节点:`154.193.250.23`
- Kubernetes 节点名:`devcloud-trade-agent-2`
- Service`NodePort 30090`
- `aurask-worker`
- 镜像:复用 `registry.mydevcloud.love/devcloud/aurask-api`
- 节点:`45.113.2.55`
- Kubernetes 节点名:`devcloud-trade-agent-1`
- 启动命令:`python -m aurask worker --data /data/state.json`
- `aurask-manager`
- 镜像:`registry.mydevcloud.love/devcloud/aurask-manager`
- 来源:`manager/`
- 节点:`154.193.250.23`
- Kubernetes 节点名:`devcloud-trade-agent-2`
- Service`NodePort 30092`
- 当前不接入公网 Caddy仅作为管理员入口预留
- `postgres`
- 镜像:`pgvector/pgvector:pg16`
- 节点:`45.113.2.55`
- Kubernetes 节点名:`devcloud-trade-agent-1`
- Service仅集群内访问
- 用途:预置 PostgreSQL + PGVector 数据面,当前业务主存储仍为 MVP JSON
- `redis`
- 镜像:`redis:7-alpine`
- 节点:`45.113.2.55`
- Kubernetes 节点名:`devcloud-trade-agent-1`
- Service仅集群内访问
- 用途:预置队列、幂等、缓存、限流数据面
- `aurask` 命名空间
- `aurask-api-state` PVC
- `StorageClass`: `local-path`
@ -30,18 +55,16 @@
- 边界入口通过前端节点宿主机 `Caddy` 转发:
- `154.193.250.23:443` -> `NodePort 30090 / 30091`
### 当前未纳入首版自动部署的组件
### 当前未纳入自动部署的组件
以下内容仍属于后续扩展计划,**当前 `master` 分支自动发布不会部署**
- PostgreSQL
- `PGVector` / 独立向量数据库
- Redis
- `aurask-worker`
- Langflow Runtime
- AnythingLLM
- Observability / Longhorn / CNPG
当前 production overlay 中 `AURASK_USE_EXTERNAL_BRIDGES=false`。也就是说PostgreSQL / PGVector / Redis 已作为生产数据面清单落地,但 Aurask 业务代码仍运行在 MVP JSON 主存储模式;待真实 PostgreSQL repository、Redis 队列消费者、Langflow 与 AnythingLLM 工作负载完成后,再切换到外部桥接模式。
### 自动发布流水线
仓库内置 Gitea Actions 工作流:
@ -52,16 +75,20 @@
- 运行单元测试
- 构建 `aurask-api` 镜像
- 构建 `aurask-web` 镜像
- 构建 `aurask-manager` 镜像
- 推送镜像到私有仓库
- 通过 SSH 连接 `64.90.15.15`
- `kubectl apply -k deploy/k3s/base`
- 自动更新 `aurask-api``aurask-web` 镜像到当前 commit SHA
- `kubectl apply -k deploy/k3s/overlays/production`
- 自动更新 `aurask-api`、`aurask-worker`、`aurask-web` 与 `aurask-manager` 镜像到当前 commit SHA
### 仓库所需 Gitea Actions Secrets
- `SSH_PRIVATE_KEY`
- `REGISTRY_USER`
- `REGISTRY_PASSWORD`
- `POSTGRES_DB`
- `POSTGRES_USER`
- `POSTGRES_PASSWORD`
### 当前仓库内的部署资产位置
@ -72,14 +99,27 @@ deploy/
Dockerfile
aurask-web/
Dockerfile
index.html
aurask-manager/
Dockerfile
k3s/
base/
aurask-runtime-config.yaml
namespace.yaml
aurask-api-pvc.yaml
aurask-api.yaml
aurask-worker.yaml
aurask-web.yaml
aurask-manager.yaml
postgres.yaml
redis.yaml
kustomization.yaml
overlays/
production/
kustomization.yaml
examples/
aurask-runtime-secrets.example.yaml
aurask-postgres-secret.example.yaml
aurask-redis-secret.example.yaml
```
## 目标扩展方案300 名月度活跃用户)
@ -440,17 +480,17 @@ Aurask key 契约来源:
| 镜像 | 来源目录 | 用途 |
| --- | --- | --- |
| `aurask-api` | `api/` | 后端 API 与 worker 运行时 |
| `aurask-protal` | `protal/` | 用户前端静态站点 |
| `aurask-web` | `protal/` | 用户前端静态站点 |
| `aurask-manager` | `manager/` | 管理员前端静态站点 |
首版可先使用同一个 Python 镜像启动 `aurask-api``aurask-worker`
```text
aurask-api: python -m aurask serve --host 0.0.0.0 --port 8080
aurask-worker: python -m aurask worker # 后续补真实 worker 命令
aurask-worker: python -m aurask worker # 当前提供最小常驻 worker 与 --once 自检
```
在真实 worker 命令完成前,`aurask-worker` 可先部署为保留工作负载或运行后台 cron/队列消费者占位
当前 `aurask-worker` 提供最小常驻进程、心跳日志与 `--once` 自检;真实 Redis 队列消费者完成前,它作为生产拓扑预留工作负载
## 9. 环境变量
@ -588,7 +628,7 @@ deploy/k3s/
secrets.example.yaml
aurask-api.yaml
aurask-worker.yaml
aurask-protal.yaml
aurask-web.yaml
aurask-manager.yaml
langflow-runtime.yaml
anythingllm.yaml
@ -639,11 +679,11 @@ Kustomize 管理:
### Phase 3Aurask 应用层
1. 构建并推送 `aurask-api` 镜像。
2. 构建并推送 `aurask-protal` 镜像。
2. 构建并推送 `aurask-web` 镜像。
3. 构建并推送 `aurask-manager` 镜像。
4. 部署 `aurask-api`
5. 部署 `aurask-worker`
6. 部署 `aurask-protal` 与 `aurask-manager`
6. 部署 `aurask-web` 与 `aurask-manager`
7. 配置 Ingress、TLS、NetworkPolicy。
### Phase 4Runtime 层

View File

@ -15,12 +15,27 @@ spec:
spec:
imagePullSecrets:
- name: devcloud-registry
nodeSelector:
kubernetes.io/hostname: devcloud-trade-agent-1
containers:
- name: api
image: registry.mydevcloud.love/devcloud/aurask-api:latest
imagePullPolicy: Always
command:
- python
- -m
- aurask
- serve
- --data
- /data/state.json
- --host
- 0.0.0.0
- --port
- "8080"
envFrom:
- configMapRef:
name: aurask-runtime-config
- secretRef:
name: aurask-runtime-secrets
optional: true
ports:
- containerPort: 8080
name: http
@ -57,11 +72,9 @@ metadata:
name: aurask-api
namespace: aurask
spec:
type: NodePort
selector:
app: aurask-api
ports:
- name: http
port: 8080
targetPort: 8080
nodePort: 30091

View File

@ -0,0 +1,56 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: aurask-manager
namespace: aurask
spec:
replicas: 1
selector:
matchLabels:
app: aurask-manager
template:
metadata:
labels:
app: aurask-manager
spec:
imagePullSecrets:
- name: devcloud-registry
containers:
- name: manager
image: registry.mydevcloud.love/devcloud/aurask-manager:latest
imagePullPolicy: Always
ports:
- containerPort: 80
name: http
readinessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 15
periodSeconds: 20
resources:
requests:
cpu: 50m
memory: 64Mi
limits:
cpu: 200m
memory: 256Mi
---
apiVersion: v1
kind: Service
metadata:
name: aurask-manager
namespace: aurask
spec:
selector:
app: aurask-manager
ports:
- name: http
port: 80
targetPort: 80

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: aurask-runtime-config
namespace: aurask
data:
AURASK_USE_EXTERNAL_BRIDGES: "false"
AURASK_POSTGRES_MIN_CONNECTIONS: "1"
AURASK_POSTGRES_MAX_CONNECTIONS: "10"
AURASK_PGVECTOR_TABLE: aurask_vectors
AURASK_PGVECTOR_DIMENSION: "1536"
AURASK_REDIS_WORKFLOW_QUEUE: "aurask:workflow-runs"
AURASK_REDIS_LOCK_PREFIX: "aurask:lock:"
AURASK_REDIS_CACHE_PREFIX: "aurask:cache:"

View File

@ -15,8 +15,6 @@ spec:
spec:
imagePullSecrets:
- name: devcloud-registry
nodeSelector:
kubernetes.io/hostname: devcloud-trade-agent-2
containers:
- name: web
image: registry.mydevcloud.love/devcloud/aurask-web:latest
@ -50,11 +48,9 @@ metadata:
name: aurask-web
namespace: aurask
spec:
type: NodePort
selector:
app: aurask-web
ports:
- name: http
port: 80
targetPort: 80
nodePort: 30090

View File

@ -0,0 +1,62 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: aurask-worker
namespace: aurask
spec:
replicas: 1
selector:
matchLabels:
app: aurask-worker
template:
metadata:
labels:
app: aurask-worker
spec:
imagePullSecrets:
- name: devcloud-registry
terminationGracePeriodSeconds: 10
containers:
- name: worker
image: registry.mydevcloud.love/devcloud/aurask-api:latest
imagePullPolicy: Always
command:
- python
- -m
- aurask
- worker
- --data
- /data/state.json
envFrom:
- configMapRef:
name: aurask-runtime-config
- secretRef:
name: aurask-runtime-secrets
optional: true
volumeMounts:
- name: aurask-api-state
mountPath: /data
startupProbe:
exec:
command:
- python
- -m
- aurask
- worker
- --data
- /data/state.json
- --once
initialDelaySeconds: 5
periodSeconds: 10
failureThreshold: 12
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: "1"
memory: 1Gi
volumes:
- name: aurask-api-state
persistentVolumeClaim:
claimName: aurask-api-state

View File

@ -2,6 +2,11 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
- aurask-runtime-config.yaml
- aurask-api-pvc.yaml
- aurask-api.yaml
- aurask-worker.yaml
- aurask-web.yaml
- aurask-manager.yaml
- postgres.yaml
- redis.yaml

View File

@ -0,0 +1,106 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: postgres-init
namespace: aurask
data:
10-enable-vector.sql: |
CREATE EXTENSION IF NOT EXISTS vector;
---
apiVersion: v1
kind: Service
metadata:
name: postgres
namespace: aurask
spec:
clusterIP: None
selector:
app: postgres
ports:
- name: postgres
port: 5432
targetPort: 5432
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgres
namespace: aurask
spec:
serviceName: postgres
replicas: 1
selector:
matchLabels:
app: postgres
template:
metadata:
labels:
app: postgres
spec:
containers:
- name: postgres
image: pgvector/pgvector:pg16
imagePullPolicy: IfNotPresent
ports:
- containerPort: 5432
name: postgres
env:
- name: POSTGRES_DB
valueFrom:
secretKeyRef:
name: aurask-postgres
key: POSTGRES_DB
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: aurask-postgres
key: POSTGRES_USER
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: aurask-postgres
key: POSTGRES_PASSWORD
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
volumeMounts:
- name: postgres-data
mountPath: /var/lib/postgresql/data
- name: postgres-init
mountPath: /docker-entrypoint-initdb.d/10-enable-vector.sql
subPath: 10-enable-vector.sql
readinessProbe:
exec:
command:
- sh
- -c
- pg_isready -U "$POSTGRES_USER" -d "$POSTGRES_DB"
initialDelaySeconds: 10
periodSeconds: 10
livenessProbe:
exec:
command:
- sh
- -c
- pg_isready -U "$POSTGRES_USER" -d "$POSTGRES_DB"
initialDelaySeconds: 20
periodSeconds: 20
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: "2"
memory: 4Gi
volumes:
- name: postgres-init
configMap:
name: postgres-init
volumeClaimTemplates:
- metadata:
name: postgres-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi

View File

@ -0,0 +1,96 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: redis-config
namespace: aurask
data:
redis.conf: |
bind 0.0.0.0
protected-mode yes
appendonly yes
dir /data
save 900 1
save 300 10
save 60 10000
maxmemory-policy noeviction
---
apiVersion: v1
kind: Service
metadata:
name: redis
namespace: aurask
spec:
clusterIP: None
selector:
app: redis
ports:
- name: redis
port: 6379
targetPort: 6379
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: redis
namespace: aurask
spec:
serviceName: redis
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:7-alpine
imagePullPolicy: IfNotPresent
command:
- redis-server
- /etc/redis/redis.conf
ports:
- containerPort: 6379
name: redis
volumeMounts:
- name: redis-config
mountPath: /etc/redis/redis.conf
subPath: redis.conf
- name: redis-data
mountPath: /data
readinessProbe:
exec:
command:
- redis-cli
- ping
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
exec:
command:
- redis-cli
- ping
initialDelaySeconds: 20
periodSeconds: 20
resources:
requests:
cpu: 250m
memory: 256Mi
limits:
cpu: "1"
memory: 1Gi
volumes:
- name: redis-config
configMap:
name: redis-config
volumeClaimTemplates:
- metadata:
name: redis-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 8Gi

View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: aurask-postgres
namespace: aurask
type: Opaque
stringData:
POSTGRES_DB: aurask
POSTGRES_USER: aurask
POSTGRES_PASSWORD: REPLACE_ME

View File

@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: aurask-redis
namespace: aurask
type: Opaque
stringData:
REDIS_PASSWORD: REPLACE_ME

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Secret
metadata:
name: aurask-runtime-secrets
namespace: aurask
type: Opaque
stringData:
AURASK_USE_EXTERNAL_BRIDGES: "true"
AURASK_DATABASE_URL: postgresql://aurask:REPLACE_ME@postgres.aurask.svc.cluster.local:5432/aurask
AURASK_REDIS_URL: redis://redis.aurask.svc.cluster.local:6379/0
AURASK_REDIS_WORKFLOW_QUEUE: aurask:workflow-runs
AURASK_ANYTHINGLLM_BASE_URL: http://anythingllm.aurask-runtime.svc.cluster.local:3001
AURASK_ANYTHINGLLM_API_KEY: REPLACE_ME
AURASK_LANGFLOW_BASE_URL: http://langflow-runtime.aurask-runtime.svc.cluster.local:7860
AURASK_LANGFLOW_API_KEY: REPLACE_ME

View File

@ -0,0 +1,23 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: aurask-api
namespace: aurask
spec:
template:
spec:
nodeSelector:
kubernetes.io/hostname: devcloud-trade-agent-1
---
apiVersion: v1
kind: Service
metadata:
name: aurask-api
namespace: aurask
spec:
type: NodePort
ports:
- name: http
port: 8080
targetPort: 8080
nodePort: 30091

View File

@ -0,0 +1,23 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: aurask-manager
namespace: aurask
spec:
template:
spec:
nodeSelector:
kubernetes.io/hostname: devcloud-trade-agent-2
---
apiVersion: v1
kind: Service
metadata:
name: aurask-manager
namespace: aurask
spec:
type: NodePort
ports:
- name: http
port: 80
targetPort: 80
nodePort: 30092

View File

@ -0,0 +1,23 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: aurask-web
namespace: aurask
spec:
template:
spec:
nodeSelector:
kubernetes.io/hostname: devcloud-trade-agent-2
---
apiVersion: v1
kind: Service
metadata:
name: aurask-web
namespace: aurask
spec:
type: NodePort
ports:
- name: http
port: 80
targetPort: 80
nodePort: 30090

View File

@ -0,0 +1,10 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: aurask-worker
namespace: aurask
spec:
template:
spec:
nodeSelector:
kubernetes.io/hostname: devcloud-trade-agent-1

View File

@ -0,0 +1,12 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ../../base
patches:
- path: aurask-api-production.yaml
- path: aurask-worker-production.yaml
- path: aurask-web-production.yaml
- path: aurask-manager-production.yaml
- path: postgres-production.yaml
- path: redis-production.yaml
- path: runtime-config-production.yaml

View File

@ -0,0 +1,10 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgres
namespace: aurask
spec:
template:
spec:
nodeSelector:
kubernetes.io/hostname: devcloud-trade-agent-1

View File

@ -0,0 +1,10 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: redis
namespace: aurask
spec:
template:
spec:
nodeSelector:
kubernetes.io/hostname: devcloud-trade-agent-1

View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: aurask-runtime-config
namespace: aurask
data:
AURASK_USE_EXTERNAL_BRIDGES: "false"

42
tests/test_cli.py Normal file
View File

@ -0,0 +1,42 @@
from __future__ import annotations
import io
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from unittest import mock
from aurask.cli import DEFAULT_DATA_PATH, main
class AuraskCLITests(unittest.TestCase):
def test_worker_once_self_check_exits_cleanly(self) -> None:
with tempfile.TemporaryDirectory() as temp_dir:
data_path = Path(temp_dir) / "state.json"
stdout = io.StringIO()
argv = ["aurask", "worker", "--data", str(data_path), "--once"]
with mock.patch.object(sys, "argv", argv), mock.patch("sys.stdout", stdout):
main()
payload = json.loads(stdout.getvalue())
self.assertEqual(payload["message"], "Aurask worker self-check passed")
self.assertEqual(payload["data_path"], str(data_path))
self.assertGreaterEqual(payload["workflow_templates"], 1)
self.assertGreaterEqual(payload["plans"], 1)
def test_demo_remains_default_command(self) -> None:
with tempfile.TemporaryDirectory() as temp_dir:
current_dir = Path.cwd()
stdout = io.StringIO()
try:
os.chdir(temp_dir)
with mock.patch.object(sys, "argv", ["aurask"]), mock.patch("sys.stdout", stdout):
main()
finally:
os.chdir(current_dir)
payload = json.loads(stdout.getvalue())
self.assertEqual(payload["message"], "Aurask MVP demo completed")
self.assertEqual(payload["workspace_id"][:3], "ws_")
self.assertEqual(DEFAULT_DATA_PATH.parts, (".aurask", "state.json"))