Implement sentiment/NLP annotation and optimise processing
This commit is contained in:
@@ -2,19 +2,13 @@
|
||||
import asyncio
|
||||
import random
|
||||
import string
|
||||
from concurrent.futures import ProcessPoolExecutor
|
||||
from datetime import datetime
|
||||
from math import ceil
|
||||
|
||||
import aiohttp
|
||||
import ujson
|
||||
from bs4 import BeautifulSoup
|
||||
from numpy import array_split
|
||||
from siphashc import siphash
|
||||
|
||||
import db
|
||||
import util
|
||||
from schemas.ch4_s import ATTRMAP
|
||||
|
||||
# CONFIGURATION #
|
||||
|
||||
@@ -30,13 +24,8 @@ CRAWL_DELAY = 5
|
||||
# Semaphore value ?
|
||||
THREADS_SEMAPHORE = 1000
|
||||
|
||||
# Maximum number of CPU threads to use for post processing
|
||||
CPU_THREADS = 8
|
||||
|
||||
# CONFIGURATION END #
|
||||
|
||||
p = ProcessPoolExecutor(CPU_THREADS)
|
||||
|
||||
|
||||
class Chan4(object):
|
||||
"""
|
||||
@@ -83,10 +72,12 @@ class Chan4(object):
|
||||
self.log.debug(f"Got boards: {self.boards}")
|
||||
|
||||
async def get_thread_lists(self, boards):
|
||||
self.log.debug(f"Getting thread list for {boards}")
|
||||
# self.log.debug(f"Getting thread list for {boards}")
|
||||
board_urls = {board: f"{board}/catalog.json" for board in boards}
|
||||
responses = await self.api_call(board_urls)
|
||||
to_get = []
|
||||
flat_map = [board for board, thread in responses]
|
||||
self.log.debug(f"Got thread list for {flat_map}: {len(responses)}")
|
||||
for mapped, response in responses:
|
||||
if not response:
|
||||
continue
|
||||
@@ -95,7 +86,6 @@ class Chan4(object):
|
||||
no = threads["no"]
|
||||
to_get.append((mapped, no))
|
||||
|
||||
self.log.debug(f"Got thread list for {mapped}: {len(response)}")
|
||||
if not to_get:
|
||||
return
|
||||
split_threads = array_split(to_get, ceil(len(to_get) / THREADS_CONCURRENT))
|
||||
@@ -122,46 +112,20 @@ class Chan4(object):
|
||||
(board, thread): f"{board}/thread/{thread}.json"
|
||||
for board, thread in thread_list
|
||||
}
|
||||
self.log.debug(f"Getting information for threads: {thread_urls}")
|
||||
# self.log.debug(f"Getting information for threads: {thread_urls}")
|
||||
responses = await self.api_call(thread_urls)
|
||||
self.log.debug(f"Got information for threads: {thread_urls}")
|
||||
self.log.debug(f"Got information for {len(responses)} threads")
|
||||
|
||||
all_posts = {}
|
||||
for mapped, response in responses:
|
||||
if not response:
|
||||
continue
|
||||
board, thread = mapped
|
||||
self.log.debug(f"Got thread content for thread {thread} on board {board}")
|
||||
all_posts[mapped] = response["posts"]
|
||||
|
||||
# Split into 10,000 chunks
|
||||
if not all_posts:
|
||||
return
|
||||
await self.handle_posts(all_posts)
|
||||
# threads_per_core = int(len(all_posts) / CPU_THREADS)
|
||||
# for i in range(CPU_THREADS):
|
||||
# new_dict = {}
|
||||
# pulled_posts = self.take_items(all_posts, threads_per_core)
|
||||
# for k, v in pulled_posts:
|
||||
# if k in new_dict:
|
||||
# new_dict[k].append(v)
|
||||
# else:
|
||||
# new_dict[k] = [v]
|
||||
# await self.handle_posts_thread(new_dict)
|
||||
|
||||
# print("VAL", ceil(len(all_posts) / threads_per_core))
|
||||
# split_posts = array_split(all_posts, ceil(len(all_posts) / threads_per_core))
|
||||
# print("THREADS PER CORE SPLIT", len(split_posts))
|
||||
# # print("SPLIT CHUNK", len(split_posts))
|
||||
# for posts in split_posts:
|
||||
# print("SPAWNED THREAD TO PROCESS", len(posts), "POSTS")
|
||||
# await self.handle_posts_thread(posts)
|
||||
|
||||
# await self.handle_posts_thread(all_posts)
|
||||
|
||||
@asyncio.coroutine
|
||||
def handle_posts_thread(self, posts):
|
||||
loop = asyncio.get_event_loop()
|
||||
yield from loop.run_in_executor(p, self.handle_posts, posts)
|
||||
|
||||
async def handle_posts(self, posts):
|
||||
to_store = []
|
||||
@@ -170,50 +134,13 @@ class Chan4(object):
|
||||
for index, post in enumerate(post_list):
|
||||
posts[key][index]["type"] = "msg"
|
||||
|
||||
# # Calculate hash for post
|
||||
# post_normalised = ujson.dumps(post, sort_keys=True)
|
||||
# hash = siphash(self.hash_key, post_normalised)
|
||||
# hash = str(hash)
|
||||
# redis_key = f"cache.{board}.{thread}.{post['no']}"
|
||||
# key_content = db.r.get(redis_key)
|
||||
# if key_content:
|
||||
# key_content = key_content.decode("ascii")
|
||||
# if key_content == hash:
|
||||
# continue
|
||||
# else:
|
||||
# posts[key][index]["type"] = "update"
|
||||
# #db.r.set(redis_key, hash)
|
||||
|
||||
# for key2, value in list(post.items()):
|
||||
# if key2 in ATTRMAP:
|
||||
# post[ATTRMAP[key2]] = posts[key][index][key2]
|
||||
# del posts[key][index][key2]
|
||||
# if "ts" in post:
|
||||
# old_time = posts[key][index]["ts"]
|
||||
# # '08/30/22(Tue)02:25:37'
|
||||
# time_spl = old_time.split(":")
|
||||
# if len(time_spl) == 3:
|
||||
# old_ts = datetime.strptime(old_time, "%m/%d/%y(%a)%H:%M:%S")
|
||||
# else:
|
||||
# old_ts = datetime.strptime(old_time, "%m/%d/%y(%a)%H:%M")
|
||||
# # new_ts = old_ts.isoformat()
|
||||
# new_ts = int(old_ts.timestamp())
|
||||
# posts[key][index]["ts"] = new_ts
|
||||
# if "msg" in post:
|
||||
# soup = BeautifulSoup(posts[key][index]["msg"], "html.parser")
|
||||
# msg = soup.get_text(separator="\n")
|
||||
# posts[key][index]["msg"] = msg
|
||||
|
||||
posts[key][index]["src"] = "4ch"
|
||||
posts[key][index]["net"] = board
|
||||
posts[key][index]["channel"] = thread
|
||||
|
||||
to_store.append(posts[key][index])
|
||||
|
||||
# print({name_map[name]: val for name, val in post.items()})
|
||||
# print(f"Got posts: {len(posts)}")
|
||||
if to_store:
|
||||
print("STORING", len(to_store))
|
||||
await db.queue_message_bulk(to_store)
|
||||
|
||||
async def fetch(self, url, session, mapped):
|
||||
@@ -238,7 +165,7 @@ class Chan4(object):
|
||||
async with aiohttp.ClientSession(connector=connector) as session:
|
||||
for mapped, method in methods.items():
|
||||
url = f"{self.api_endpoint}/{method}"
|
||||
self.log.debug(f"GET {url}")
|
||||
# self.log.debug(f"GET {url}")
|
||||
task = asyncio.create_task(self.bound_fetch(sem, url, session, mapped))
|
||||
# task = asyncio.ensure_future(self.bound_fetch(sem, url, session))
|
||||
tasks.append(task)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import asyncio
|
||||
|
||||
import ujson
|
||||
import orjson
|
||||
|
||||
import db
|
||||
import util
|
||||
@@ -8,9 +8,13 @@ from processing import process
|
||||
|
||||
SOURCES = ["4ch", "irc", "dis"]
|
||||
KEYPREFIX = "queue."
|
||||
CHUNK_SIZE = 90000
|
||||
|
||||
# Chunk size per source (divide by len(SOURCES) for total)
|
||||
CHUNK_SIZE = 9000
|
||||
ITER_DELAY = 0.5
|
||||
|
||||
log = util.get_logger("ingest")
|
||||
|
||||
|
||||
class Ingest(object):
|
||||
def __init__(self):
|
||||
@@ -18,8 +22,6 @@ class Ingest(object):
|
||||
self.log = util.get_logger(name)
|
||||
|
||||
async def run(self):
|
||||
# items = [{'no': 23567753, 'now': '09/12/22(Mon)20:10:29', 'name': 'Anonysmous', 'filename': '1644986767568', 'ext': '.webm', 'w': 1280, 'h': 720, 'tn_w': 125, 'tn_h': 70, 'tim': 1663027829301457, 'time': 1663027829, 'md5': 'zeElr1VR05XpZ2XuAPhmPA==', 'fsize': 3843621, 'resto': 23554700, 'type': 'msg', 'src': '4ch', 'net': 'gif', 'channel': '23554700'}]
|
||||
# await process.spawn_processing_threads(items)
|
||||
while True:
|
||||
await self.get_chunk()
|
||||
await asyncio.sleep(ITER_DELAY)
|
||||
@@ -31,11 +33,8 @@ class Ingest(object):
|
||||
chunk = await db.ar.spop(key, CHUNK_SIZE)
|
||||
if not chunk:
|
||||
continue
|
||||
# self.log.info(f"Got chunk: {chunk}")
|
||||
for item in chunk:
|
||||
item = ujson.loads(item)
|
||||
# self.log.info(f"Got item: {item}")
|
||||
item = orjson.loads(item)
|
||||
items.append(item)
|
||||
if items:
|
||||
print("PROCESSING", len(items))
|
||||
await process.spawn_processing_threads(items)
|
||||
|
||||
Reference in New Issue
Block a user