Automatic optimisation for ingest chunking based on processed messages
This commit is contained in:
parent
1aeadaf3b7
commit
1cc2ef629e
@ -2,17 +2,14 @@ version: "2.2"
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
app:
|
app:
|
||||||
image: pathogen/monolith:latest
|
image: xf/monolith:latest
|
||||||
container_name: monolith
|
container_name: monolith
|
||||||
build: .
|
build: .
|
||||||
volumes:
|
volumes:
|
||||||
- ${PORTAINER_GIT_DIR}:/code
|
- ${PORTAINER_GIT_DIR}:/code
|
||||||
# env_file:
|
- type: bind
|
||||||
# - stack.env
|
source: /code/run
|
||||||
networks:
|
target: /var/run
|
||||||
- default
|
|
||||||
- xf
|
|
||||||
- db
|
|
||||||
environment:
|
environment:
|
||||||
PORTAINER_GIT_DIR: "${PORTAINER_GIT_DIR}"
|
PORTAINER_GIT_DIR: "${PORTAINER_GIT_DIR}"
|
||||||
MODULES_ENABLED: "${MODULES_ENABLED}"
|
MODULES_ENABLED: "${MODULES_ENABLED}"
|
||||||
@ -49,41 +46,22 @@ services:
|
|||||||
MONOLITH_PROCESS_PERFSTATS: "${MONOLITH_PROCESS_PERFSTATS}"
|
MONOLITH_PROCESS_PERFSTATS: "${MONOLITH_PROCESS_PERFSTATS}"
|
||||||
MONOLITH_CH4_BOARDS: "${MONOLITH_CH4_BOARDS}"
|
MONOLITH_CH4_BOARDS: "${MONOLITH_CH4_BOARDS}"
|
||||||
REDIS_PASSWORD: "${REDIS_PASSWORD}"
|
REDIS_PASSWORD: "${REDIS_PASSWORD}"
|
||||||
|
MONOLITH_INGEST_INCREASE_BELOW: "${MONOLITH_INGEST_INCREASE_BELOW}"
|
||||||
|
MONOLITH_INGEST_INCREASE_BY: "${MONOLITH_INGEST_INCREASE_BY}"
|
||||||
|
MONOLITH_INGEST_DECREASE_ABOVE: "${MONOLITH_INGEST_DECREASE_ABOVE}"
|
||||||
|
MONOLITH_INGEST_DECREASE_BY: "${MONOLITH_INGEST_DECREASE_BY}"
|
||||||
|
MONOLITH_INGEST_MAX: "${MONOLITH_INGEST_MAX}"
|
||||||
|
MONOLITH_INGEST_MIN: "${MONOLITH_INGEST_MIN}"
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpus: '0.5'
|
||||||
|
memory: 1.0G
|
||||||
|
network_mode: host
|
||||||
|
|
||||||
db:
|
|
||||||
#image: pathogen/manticore:kibana
|
|
||||||
image: manticoresearch/manticore:dev
|
|
||||||
container_name: monolith_db
|
|
||||||
#build:
|
|
||||||
# context: ./docker/manticore
|
|
||||||
# args:
|
|
||||||
# DEV: 1
|
|
||||||
restart: always
|
|
||||||
ports:
|
|
||||||
- 9308
|
|
||||||
- 9312
|
|
||||||
- 9306
|
|
||||||
ulimits:
|
|
||||||
nproc: 65535
|
|
||||||
nofile:
|
|
||||||
soft: 65535
|
|
||||||
hard: 65535
|
|
||||||
memlock:
|
|
||||||
soft: -1
|
|
||||||
hard: -1
|
|
||||||
environment:
|
|
||||||
# - MCL=1
|
|
||||||
- EXTRA=1
|
|
||||||
networks:
|
|
||||||
- default
|
|
||||||
- xf
|
|
||||||
- db
|
|
||||||
volumes:
|
|
||||||
- ./docker/data:/var/lib/manticore
|
|
||||||
# - ./docker/manticore.conf:/etc/manticoresearch/manticore.conf
|
|
||||||
|
|
||||||
# threshold:
|
# threshold:
|
||||||
# image: pathogen/threshold:latest
|
# image: xf/threshold:latest
|
||||||
# container_name: threshold
|
# container_name: threshold
|
||||||
# build: legacy/docker
|
# build: legacy/docker
|
||||||
# volumes:
|
# volumes:
|
||||||
@ -148,16 +126,17 @@ services:
|
|||||||
- "1289:1289"
|
- "1289:1289"
|
||||||
environment:
|
environment:
|
||||||
- SSDB_PORT=1289
|
- SSDB_PORT=1289
|
||||||
networks:
|
volumes:
|
||||||
- default
|
- monolith_ssdb_data:/var/lib/ssdb
|
||||||
- db
|
# networks:
|
||||||
|
# - default
|
||||||
# tmp:
|
# - db
|
||||||
# image: busybox
|
deploy:
|
||||||
# container_name: tmp_monolith
|
resources:
|
||||||
# command: chmod -R 777 /var/run/socks
|
limits:
|
||||||
# volumes:
|
cpus: '0.5'
|
||||||
# - /var/run/socks
|
memory: 1.0G
|
||||||
|
network_mode: host
|
||||||
|
|
||||||
redis:
|
redis:
|
||||||
image: redis
|
image: redis
|
||||||
@ -171,6 +150,9 @@ services:
|
|||||||
volumes:
|
volumes:
|
||||||
- ${PORTAINER_GIT_DIR}/docker/redis.conf:/etc/redis.conf
|
- ${PORTAINER_GIT_DIR}/docker/redis.conf:/etc/redis.conf
|
||||||
- monolith_redis_data:/data
|
- monolith_redis_data:/data
|
||||||
|
- type: bind
|
||||||
|
source: /code/run
|
||||||
|
target: /var/run
|
||||||
# volumes_from:
|
# volumes_from:
|
||||||
# - tmp
|
# - tmp
|
||||||
healthcheck:
|
healthcheck:
|
||||||
@ -178,18 +160,25 @@ services:
|
|||||||
interval: 2s
|
interval: 2s
|
||||||
timeout: 2s
|
timeout: 2s
|
||||||
retries: 15
|
retries: 15
|
||||||
networks:
|
# networks:
|
||||||
- default
|
# - default
|
||||||
- xf
|
# - xf
|
||||||
- db
|
# - db
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpus: '0.5'
|
||||||
|
memory: 1.0G
|
||||||
|
network_mode: host
|
||||||
|
|
||||||
networks:
|
# networks:
|
||||||
default:
|
# default:
|
||||||
driver: bridge
|
# driver: bridge
|
||||||
xf:
|
# xf:
|
||||||
external: true
|
# external: true
|
||||||
db:
|
# db:
|
||||||
external: true
|
# external: true
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
monolith_redis_data:
|
monolith_redis_data:
|
||||||
|
monolith_ssdb_data:
|
@ -22,3 +22,4 @@ orjson
|
|||||||
uvloop
|
uvloop
|
||||||
elasticsearch[async]
|
elasticsearch[async]
|
||||||
msgpack
|
msgpack
|
||||||
|
# flpc
|
||||||
|
@ -11,8 +11,15 @@ KEYNAME = "queue"
|
|||||||
CHUNK_SIZE = int(getenv("MONOLITH_INGEST_CHUNK_SIZE", "900"))
|
CHUNK_SIZE = int(getenv("MONOLITH_INGEST_CHUNK_SIZE", "900"))
|
||||||
ITER_DELAY = float(getenv("MONOLITH_INGEST_ITER_DELAY", "0.5"))
|
ITER_DELAY = float(getenv("MONOLITH_INGEST_ITER_DELAY", "0.5"))
|
||||||
|
|
||||||
|
INGEST_INCREASE_BELOW = int(getenv("MONOLITH_INGEST_INCREASE_BELOW", "2500"))
|
||||||
|
INGEST_DECREASE_ABOVE = int(getenv("MONOLITH_INGEST_DECREASE_ABOVE", "10000"))
|
||||||
|
|
||||||
|
INGEST_INCREASE_BY = int(getenv("MONOLITH_INGEST_INCREASE_BY", "100"))
|
||||||
|
INGEST_DECREASE_BY = int(getenv("MONOLITH_INGEST_DECREASE_BY", "100"))
|
||||||
log = util.get_logger("ingest")
|
log = util.get_logger("ingest")
|
||||||
|
|
||||||
|
INGEST_MAX = int(getenv("MONOLITH_INGEST_MAX", "1000000"))
|
||||||
|
INGEST_MIN = int(getenv("MONOLITH_INGEST_MIN", "100"))
|
||||||
|
|
||||||
class Ingest(object):
|
class Ingest(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -34,9 +41,36 @@ class Ingest(object):
|
|||||||
await asyncio.sleep(ITER_DELAY)
|
await asyncio.sleep(ITER_DELAY)
|
||||||
|
|
||||||
async def get_chunk(self):
|
async def get_chunk(self):
|
||||||
|
global CHUNK_SIZE
|
||||||
length = await db.ar.llen(KEYNAME)
|
length = await db.ar.llen(KEYNAME)
|
||||||
if length > CHUNK_SIZE:
|
if length > CHUNK_SIZE:
|
||||||
length = CHUNK_SIZE
|
length = CHUNK_SIZE
|
||||||
if not length:
|
if not length:
|
||||||
return
|
return
|
||||||
await process.spawn_processing_threads(self.current_chunk, length)
|
ingested = await process.spawn_processing_threads(self.current_chunk, length)
|
||||||
|
|
||||||
|
if ingested < INGEST_INCREASE_BELOW:
|
||||||
|
if CHUNK_SIZE + INGEST_INCREASE_BY < INGEST_MAX:
|
||||||
|
self.log.info(
|
||||||
|
(
|
||||||
|
f"Increasing chunk size to "
|
||||||
|
f"{CHUNK_SIZE + INGEST_INCREASE_BY} "
|
||||||
|
f"due to low ingestion ({ingested})"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
CHUNK_SIZE += INGEST_INCREASE_BY
|
||||||
|
else:
|
||||||
|
log.info(f"Chunk size ({CHUNK_SIZE}) at maximum, not increasing above: {INGEST_MAX}")
|
||||||
|
|
||||||
|
elif ingested > INGEST_DECREASE_ABOVE:
|
||||||
|
if CHUNK_SIZE - INGEST_DECREASE_BY > INGEST_MIN:
|
||||||
|
self.log.info(
|
||||||
|
(
|
||||||
|
f"Decreasing chunk size to "
|
||||||
|
f"{CHUNK_SIZE - INGEST_DECREASE_BY}"
|
||||||
|
f"due to high ingestion ({ingested})"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
CHUNK_SIZE -= INGEST_DECREASE_BY
|
||||||
|
else:
|
||||||
|
log.info(f"Chunk size ({CHUNK_SIZE}) at minimum, not decreasing below: {INGEST_MIN}")
|
||||||
|
Loading…
Reference in New Issue
Block a user