You need to agree to share your contact information to access this dataset
This repository is publicly accessible, but
you have to accept the conditions to access its files and content.
Log in
or
Sign Up
to review the conditions and access this dataset content.
Dataset with annotation candidates for a GEC task in the Tatar language.
import hashlib
import os
import re
from typing import Dict, List
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from datasets import load_dataset
DATASET_NAME = "yasalma/vk-messages"
DATASET_SPLIT = "train"
OUTPUT_PARQUET = "filtered_tatar.parquet"
READ_BATCH_SIZE = 10_000
WRITE_BATCH_SIZE = 5_000
TEXT_FIELD = "message"
MIN_WORDS = 5
MAX_WORDS = 40
MIN_CHARS = 20
MAX_CHARS = 300
MIN_LETTERS = 10
MIN_TATAR_LETTERS = 5
url_pattern = re.compile(r"(https?://\S+|www\.\S+)")
tatar_letters_pattern = re.compile(r"[ӘәҮүҖҗҢңӨөҺһ]")
letter_pattern = re.compile(r"[A-Za-zA-Яа-яЁёӘәҮүҖҗҢңӨөҺһ]")
sentence_split_re = re.compile(r"(?<=[.!?])\s+")
system_msgs = {
"Post is not in text format.",
"Comment is not in text format.",
}
def normalize_series(series: pd.Series) -> pd.Series:
return series.fillna("").astype(str).str.replace(r"\s+", " ", regex=True).str.strip()
def split_and_chunk(text: str) -> List[str]:
sentences = [s.strip() for s in sentence_split_re.split(text) if s.strip()]
if not sentences:
return []
chunks: List[str] = []
current: List[str] = []
for s in sentences:
candidate = (" ".join(current + [s])).strip()
if current and (len(candidate.split()) > MAX_WORDS or len(candidate) > MAX_CHARS):
chunks.append(" ".join(current).strip())
current = [s]
else:
current.append(s)
if current:
chunks.append(" ".join(current).strip())
result: List[str] = []
for c in chunks:
words = c.split()
if not (MIN_WORDS <= len(words) <= MAX_WORDS):
continue
if not (MIN_CHARS <= len(c) <= MAX_CHARS):
continue
if len(letter_pattern.findall(c)) < MIN_LETTERS:
continue
if len(tatar_letters_pattern.findall(c)) < MIN_TATAR_LETTERS:
continue
result.append(c)
return result
def process_chunk(df_raw: pd.DataFrame) -> pd.DataFrame:
if df_raw.empty:
return pd.DataFrame(columns=["id", "url", "message"])
df = pd.DataFrame()
df["url"] = df_raw.get("url", "").fillna("").astype(str)
df["message"] = normalize_series(df_raw[TEXT_FIELD].fillna("").astype(str))
df = df[(df["url"] != "") & (df["message"] != "")]
df = df[~df["message"].isin(system_msgs)]
msg_no_urls = df["message"].str.replace(url_pattern, "", regex=True).str.strip()
df = df[msg_no_urls != ""]
letter_count = df["message"].str.count(letter_pattern)
tatar_count = df["message"].str.count(tatar_letters_pattern)
df = df[(letter_count >= MIN_LETTERS) & (tatar_count >= MIN_TATAR_LETTERS)]
df = df.drop_duplicates(subset="message")
df["submessages"] = df["message"].apply(split_and_chunk)
df = df.drop(columns=["message"])
df = df.explode("submessages")
df = df[df["submessages"].notna() & (df["submessages"].str.strip() != "")]
df = df.rename(columns={"submessages": "message"})
letter_count = df["message"].str.count(letter_pattern)
tatar_count = df["message"].str.count(tatar_letters_pattern)
df = df[(letter_count >= MIN_LETTERS) & (tatar_count >= MIN_TATAR_LETTERS)]
df["id"] = df["url"].apply(lambda u: hashlib.sha256(u.encode("utf-8")).hexdigest())
df = df.drop_duplicates(subset="message").reset_index(drop=True)
return df[["id", "url", "message"]]
auth_token = (
os.getenv("HF_TOKEN")
or os.getenv("HUGGINGFACEHUB_API_TOKEN")
or os.getenv("HUGGINGFACE_TOKEN")
)
load_kwargs = {
"path": DATASET_NAME,
"split": DATASET_SPLIT,
"streaming": True,
}
if auth_token:
load_kwargs["use_auth_token"] = auth_token
dataset = load_dataset(**load_kwargs)
writer = None
raw_buffer: List[Dict[str, str]] = []
out_buffer: List[Dict[str, str]] = []
total_written = 0
seen_hashes = set()
def flush_output():
nonlocal writer, total_written
if not out_buffer:
return
table = pa.Table.from_pylist(out_buffer)
if writer is None:
writer = pq.ParquetWriter(OUTPUT_PARQUET, table.schema, compression="snappy")
writer.write_table(table)
total_written += len(out_buffer)
out_buffer.clear()
print(f"Wrote {total_written} rows to {OUTPUT_PARQUET}...")
def handle_chunk():
if not raw_buffer:
return
chunk_df = process_chunk(pd.DataFrame(raw_buffer))
raw_buffer.clear()
for rec in chunk_df.to_dict(orient="records"):
msg_hash = hashlib.md5(rec["message"].encode("utf-8")).digest()
if msg_hash in seen_hashes:
continue
seen_hashes.add(msg_hash)
out_buffer.append(rec)
if len(out_buffer) >= WRITE_BATCH_SIZE:
flush_output()
for row in dataset:
raw_buffer.append(row)
if len(raw_buffer) >= READ_BATCH_SIZE:
handle_chunk()
handle_chunk()
flush_output()
if writer:
writer.close()
print(f"Filtered messages saved to '{OUTPUT_PARQUET}', total: {total_written}")
else:
print("No data written; dataset empty after filtering.")