Improve memory usage and fix 4chan crawler
This commit is contained in:
@@ -14,7 +14,6 @@ from concurrent.futures import ProcessPoolExecutor
|
||||
|
||||
# For timestamp processing
|
||||
from datetime import datetime
|
||||
from math import ceil
|
||||
from os import getenv
|
||||
|
||||
import orjson
|
||||
@@ -35,7 +34,6 @@ from gensim.parsing.preprocessing import ( # stem_text,
|
||||
strip_short,
|
||||
strip_tags,
|
||||
)
|
||||
from numpy import array_split
|
||||
from polyglot.detect.base import logger as polyglot_logger
|
||||
|
||||
# For NLP
|
||||
@@ -54,6 +52,8 @@ from schemas.ch4_s import ATTRMAP
|
||||
|
||||
trues = ("true", "1", "t", True)
|
||||
|
||||
KEYNAME = "queue"
|
||||
|
||||
MONOLITH_PROCESS_PERFSTATS = (
|
||||
getenv("MONOLITH_PROCESS_PERFSTATS", "false").lower() in trues
|
||||
)
|
||||
@@ -106,20 +106,23 @@ hash_key = get_hash_key()
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
async def spawn_processing_threads(data):
|
||||
len_data = len(data)
|
||||
|
||||
async def spawn_processing_threads(chunk, length):
|
||||
log.debug(f"Spawning processing threads for chunk {chunk} of length {length}")
|
||||
loop = asyncio.get_event_loop()
|
||||
tasks = []
|
||||
|
||||
if len(data) < CPU_THREADS * 100:
|
||||
split_data = [data]
|
||||
if length < CPU_THREADS * 100:
|
||||
cores = 1
|
||||
chunk_size = length
|
||||
else:
|
||||
msg_per_core = int(len(data) / CPU_THREADS)
|
||||
split_data = array_split(data, ceil(len(data) / msg_per_core))
|
||||
for index, split in enumerate(split_data):
|
||||
log.debug(f"Delegating processing of {len(split)} messages to thread {index}")
|
||||
task = loop.run_in_executor(p, process_data, split)
|
||||
cores = CPU_THREADS
|
||||
chunk_size = int(length / cores)
|
||||
|
||||
for index in range(cores):
|
||||
log.debug(
|
||||
f"[{chunk}/{index}] Delegating {chunk_size} messages to thread {index}"
|
||||
)
|
||||
task = loop.run_in_executor(p, process_data, chunk, index, chunk_size)
|
||||
tasks.append(task)
|
||||
|
||||
results = [await task for task in tasks]
|
||||
@@ -128,8 +131,8 @@ async def spawn_processing_threads(data):
|
||||
flat_list = [item for sublist in results for item in sublist]
|
||||
log.debug(
|
||||
(
|
||||
f"Results from processing of {len_data} messages in "
|
||||
f"{len(split_data)} threads: {len(flat_list)}"
|
||||
f"[{chunk}/{index}] Results from processing of {length} messages in "
|
||||
f"{cores} threads: {len(flat_list)}"
|
||||
)
|
||||
)
|
||||
await db.store_kafka_batch(flat_list)
|
||||
@@ -137,7 +140,8 @@ async def spawn_processing_threads(data):
|
||||
# log.debug(f"Finished processing {len_data} messages")
|
||||
|
||||
|
||||
def process_data(data):
|
||||
def process_data(chunk, index, chunk_size):
|
||||
log.debug(f"[{chunk}/{index}] Processing {chunk_size} messages")
|
||||
to_store = []
|
||||
|
||||
sentiment_time = 0.0
|
||||
@@ -154,7 +158,11 @@ def process_data(data):
|
||||
|
||||
# Initialise sentiment analyser
|
||||
analyzer = SentimentIntensityAnalyzer()
|
||||
for msg in data:
|
||||
for msg_index in range(chunk_size):
|
||||
msg = db.r.rpop(KEYNAME)
|
||||
if not msg:
|
||||
return
|
||||
msg = orjson.loads(msg)
|
||||
total_start = time.process_time()
|
||||
# normalise fields
|
||||
start = time.process_time()
|
||||
@@ -185,13 +193,16 @@ def process_data(data):
|
||||
post_normalised = orjson.dumps(msg, option=orjson.OPT_SORT_KEYS)
|
||||
hash = siphash(hash_key, post_normalised)
|
||||
hash = str(hash)
|
||||
redis_key = f"cache.{board}.{thread}.{msg['no']}"
|
||||
redis_key = (
|
||||
f"cache.{board}.{thread}.{msg['no']}.{msg['resto']}.{msg['now']}"
|
||||
)
|
||||
key_content = db.r.get(redis_key)
|
||||
if key_content:
|
||||
if key_content is not None:
|
||||
key_content = key_content.decode("ascii")
|
||||
if key_content == hash:
|
||||
# This deletes the message since the append at the end won't be hit
|
||||
continue
|
||||
# pass
|
||||
else:
|
||||
msg["type"] = "update"
|
||||
db.r.set(redis_key, hash)
|
||||
@@ -243,7 +254,7 @@ def process_data(data):
|
||||
msg["lang_code"] = lang_code
|
||||
msg["lang_name"] = lang_name
|
||||
except cld2_error as e:
|
||||
log.error(f"Error detecting language: {e}")
|
||||
log.error(f"[{chunk}/{index}] Error detecting language: {e}")
|
||||
# So below block doesn't fail
|
||||
lang_code = None
|
||||
time_took = (time.process_time() - start) * 1000
|
||||
@@ -277,6 +288,8 @@ def process_data(data):
|
||||
|
||||
if MONOLITH_PROCESS_PERFSTATS:
|
||||
log.debug("=====================================")
|
||||
log.debug(f"Chunk: {chunk}")
|
||||
log.debug(f"Index: {index}")
|
||||
log.debug(f"Sentiment: {sentiment_time}")
|
||||
log.debug(f"Regex: {regex_time}")
|
||||
log.debug(f"Polyglot: {polyglot_time}")
|
||||
|
||||
Reference in New Issue
Block a user