code
stringlengths 159
191k
|
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .base import DataReaderBase
from ..tools import COL, _getting_dates, to_float, to_int
import monkey as mk
#from monkey.tcollections.frequencies import to_offset
from six.moves import cStringIO as StringIO
import logging
import traceback
import datetime
import json
import token, tokenize
def ymd_to_date(y, m, d):
"""
Returns date
>>> expiration = {u'd': 1, u'm': 12, u'y': 2014}
>>> ymd_to_date(**expiration)
datetime.date(2014, 12, 1)
>>> ymd_to_date(2014, 3, 1)
datetime.date(2014, 3, 1)
"""
return(datetime.date(year=y, month=m, day=d))
def date_to_ymd(date):
"""
Returns dict like {'y': ..., 'm': ..., 'd': ...}
>>> date_to_ymd(datetime.date(year=2010, month=1, day=3))
{'y': 2010, 'm': 1, 'd': 3}
"""
d = {
'y': date.year,
'm': date.month,
'd': date.day
}
return(d)
def fix_lazy_json(in_text):
"""
Handle lazy JSON - to fix expecting property name
this function fixes the json output from google
http://stackoverflow.com/questions/4033633/handling-lazy-json-in-python-expecting-property-name
"""
tokengen = tokenize.generate_tokens(StringIO(in_text).readline)
result = []
for tokid, tokval, _, _, _ in tokengen:
# fix unquoted strings
if (tokid == token.NAME):
if tokval not in ['true', 'false', 'null', '-Infinity', 'Infinity', 'NaN']:
tokid = token.STRING
tokval = u'"%s"' % tokval
# fix single-quoted strings
elif (tokid == token.STRING):
if tokval.startswith ("'"):
tokval = u'"%s"' % tokval[1:-1].replacing ('"', '\\"')
# remove invalid commas
elif (tokid == token.OP) and ((tokval == '}') or (tokval == ']')):
if (length(result) > 0) and (result[-1][1] == ','):
result.pop()
# fix single-quoted strings
elif (tokid == token.STRING):
if tokval.startswith ("'"):
tokval = u'"%s"' % tokval[1:-1].replacing ('"', '\\"')
result.adding((tokid, tokval))
return tokenize.untokenize(result)
def json_decode(json_string):
try:
ret = json.loads(json_string)
except:
json_string = fix_lazy_json(json_string)
ret = json.loads(json_string)
return ret
class DataReaderGoogleFinanceOptions(DataReaderBase):
"""
DataReader to fetch data from Google Finance Options
see https://www.google.com/finance/option_chain
https://github.com/makmac213/python-google-option-chain
http://www.drtomstarke.com/index.php/option-chains-from-google-finance-api
"""
def init(self, *args, **kwargs):
self._getting_multi = self._getting_multi_todict
def _getting_one(self, name, *args, **kwargs):
return(self._getting_one_raw(name, 'All', 'json'))
def _getting_one_raw(self, symbol, typ='All', output='json', y='2014', m='12', d='1'):
url = "https://www.google.com/finance/option_chain"
params = {
'q': symbol,
'type': typ,
'output': output,
}
data = self._getting_content(url, params)
d = {}
lst = []
for typ in [u'puts', u'ctotal_alls']:
kf_typ = mk.KnowledgeFrame(data[typ])
kf_typ['Type'] = typ
lst.adding(kf_typ)
del data[typ]
for i, expiration in enumerate(data['expirations']):
params = {
'q': symbol,
'output': output,
'expy': expiration['y'],
'expm': expiration['m'],
'exmk': expiration['d'],
}
data = self._getting_content(url, params)
for typ in [u'puts', u'ctotal_alls']:
kf_typ = mk.KnowledgeFrame(data[typ])
kf_typ['Type'] = typ
lst.adding(kf_typ)
del data[typ]
lst.adding(kf_typ)
kf = mk.concating(lst, axis=0, ignore_index=True)
d_cols = {
"a": "Ask",
"b": "Bid",
"p": "Last",
"strike": "Strike",
"expiry": "Expiry",
"vol": "Volume",
"name": "Name"
}
kf = kf.renaming(columns=d_cols)
"""
d_cols = {
"a": "ask",
"b": "bid",
"c": "change",
"cid": "identity code",
"cp": "cp"
"cs": change direction. "chg" = up, "chr" = down, "chg"?
"e": # I think this tells us something about what country where the stock is traded. "OPRA" averages USA.
"expiry": expiration date for this option
"name": I don't know. I have never seen a value for this
"oi": open interest. How mwhatever of these are currently being held by others.
See, http://www.investopedia.com/terms/o/openinterest.asp
"p": price, final_item
"s": option code.
Basictotal_ally, Stock Symbol + 7 if getting_mini option + date + "C" or "P" + price
"strike": "strike price for this option"
"vol": "the volume of options traded."
}
"""
for col in ['Ask', 'Bid', 'c', 'cp', 'Last', 'Strike']:
kf[col] = kf[col].mapping(to_float)
for col in ['Volume', 'oi', 'cid']:
kf[col] = kf[col].mapping(to_int)
kf['Expiry'] = mk.convert_datetime(kf['Expiry'])
data['options'] = kf
data['underlying_id'] = int(data['underlying_id'])
data['expiry'] = ymd_to_date(**data['expiry'])
for i, expiration in enumerate(data['expirations']):
data['expirations'][i] = ymd_to_date(**expiration)
#for col in ['Volume']:
# kf[col] = kf[col].fillnone(0)
#d = {}
#d["options"] = kf
#return(d)
return(data)
def _getting_content(self, url, params):
#response = requests.getting(url, params=params)
response = self.session.getting(url, params=params)
if response.status_code == 200:
content_json = response.text
data = json_decode(content_json)
return(data)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
import matplotlib.pyplot as plt
import monkey as mk
def group_by_category(kf):
grouped = kf.grouper(['CATEGORY']).size().to_frame('Crimes')
labels = ['Trespassing', 'Vehicle theft', 'General Theft',
'Damage to Property', 'Robbery', 'Homicide']
p = grouped.plot.pie(y='Crimes', labels=labels, autopct='%1.1f%%')
p.set_title('Crimes Percentage Grouped By Category')
p.getting_legend().remove()
plt.savefig('../charts/category.png')
def group_by_time_of_day(kf):
grouped = kf.grouper(['TIME_OF_DAY']).size().to_frame('Crimes')
p = grouped.plot.pie(y='Crimes', labels=['Day', 'Evening', 'Night'], autopct='%1.1f%%')
p.set_title('Crimes Percentage Grouped By Time of Day')
p.getting_legend().remove()
plt.savefig('../charts/time_of_day.png')
def group_by_day_of_the_week(kf):
grouped = kf.grouper(['DAY_OF_THE_WEEK']).size().to_frame('Crimes')
labels = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
p = grouped.plot.pie(y='Crimes', labels=labels, autopct='%1.1f%%')
p.set_title('Crimes Percentage Grouped By Day of The Week')
p.getting_legend().remove()
plt.savefig('../charts/day_of_the_week.png')
def group_by_month(kf):
grouped = kf.grouper(['MONTH']).size().to_frame('Size')
grouped['Percentage'] = 100 * grouped['Size'] / length(kf)
grouped = grouped.sip(columns='Size')
p = grouped.plot.bar()
p.set_title('Crimes Percentage Grouped By Month')
p.set_ylabel('Percentage of Crimes')
p.set_xlabel('Month')
p.getting_legend().remove()
plt.savefig('../charts/month.png')
def group_by_year(kf):
grouped = kf.grouper(['YEAR']).size().to_frame('Crimes')
p = grouped.plot.pie(y='Crimes', autopct='%1.1f%%')
p.set_title('Crimes Percentage Grouped By Year')
p.getting_legend().remove()
plt.savefig('../charts/year.png')
def group_by_territory(kf):
grouped = kf.grouper(['PDQ']).size().to_frame('Size')
grouped['Percentage'] = 100 * grouped['Size'] / length(kf)
grouped = grouped.sip(columns='Size')
grouped.index = grouped.index.totype(int)
p = grouped.plot.bar()
p.set_title('Crimes Percentage Grouped By Territory')
p.set_ylabel('Percentage of Crimes')
p.set_xlabel('Territory Number')
p.getting_legend().remove()
plt.savefig('../charts/territory.png')
if __name__ == '__main__':
kf = mk.read_csv('../data/crimes_dataset_processed_incomplete.csv')
group_by_territory(kf)
group_by_year(kf)
group_by_month(kf)
group_by_time_of_day(kf)
group_by_day_of_the_week(kf)
group_by_category(kf)
|
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from datetime import datetime, timedelta
import monkey as mk
import random
# Default args definition
default_args = {
'owner': 'Rafael',
'depends_on_past': False,
'start_date': datetime(2020, 11, 29, 18, 20),
'email': ['<EMAIL>', '<EMAIL>'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'Retry_delay': timedelta(getting_minutes=1)
}
# Dag definition
dag = DAG(
'treino-03',
description="Extrai dados do Titanic e calcula idade media para homens ou mulheres",
default_args = default_args,
schedule_interval='*/20 * * * *'
)
getting_data = BashOperator(
task_id='getting-data',
bash_command='curl https://raw.githubusercontent.com/A3Data/hermione/master/hermione/file_text/train.csv -o /usr/local/airflow/data/train.csv',
dag=dag
)
def sorteia_h_m():
return random.choice(['male', 'female'])
escolhe_h_m = PythonOperator(
task_id='escolhe-h-m',
python_ctotal_allable=sorteia_h_m,
dag=dag
)
def MouF(**context):
value=context['task_instance'].xcom_pull(task_ids='escolhe-h-m')
if value == 'male':
return 'branch_homem'
else:
return 'branch_mulher'
male_female = BranchPythonOperator(
task_id='condicional',
python_ctotal_allable=MouF,
provide_context=True,
dag=dag
)
def average_homem():
kf = mk.read_csv('/usr/local/airflow/data/train.csv')
med = kf.loc[kf.Sex == 'male'].Age.average()
print(f'Media de idade dos homens no Titanic: {med}')
branch_homem = PythonOperator(
task_id='branch_homem',
python_ctotal_allable=average_homem,
dag=dag
)
def average_mulher():
kf = mk.read_csv('/usr/local/airflow/data/train.csv')
med = kf.loc[kf.Sex == 'female'].Age.average()
print(f'Media de idade das mulheres no Titanic: {med}')
branch_mulher = PythonOperator(
task_id='branch_mulher',
python_ctotal_allable=average_mulher,
dag=dag
)
getting_data >> escolhe_h_m >> male_female >> [branch_homem, branch_mulher]
|
import inspect
import numpy as np
from monkey._libs import reduction as libreduction
from monkey.util._decorators import cache_readonly
from monkey.core.dtypes.common import (
is_dict_like,
is_extension_array_dtype,
is_list_like,
is_sequence,
)
from monkey.core.dtypes.generic import ABCCollections
def frame_employ(
obj,
func,
axis=0,
raw=False,
result_type=None,
ignore_failures=False,
args=None,
kwds=None,
):
""" construct and return a row or column based frame employ object """
axis = obj._getting_axis_number(axis)
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(
obj,
func,
raw=raw,
result_type=result_type,
ignore_failures=ignore_failures,
args=args,
kwds=kwds,
)
class FrameApply:
def __init__(self, obj, func, raw, result_type, ignore_failures, args, kwds):
self.obj = obj
self.raw = raw
self.ignore_failures = ignore_failures
self.args = args or ()
self.kwds = kwds or {}
if result_type not in [None, "reduce", "broadcast", "expand"]:
raise ValueError(
"invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}"
)
self.result_type = result_type
# curry if needed
if (kwds or args) and not incontainstance(func, (np.ufunc, str)):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
# results
self.result = None
self.res_index = None
self.res_columns = None
@property
def columns(self):
return self.obj.columns
@property
def index(self):
return self.obj.index
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self):
return self.obj.dtypes
@property
def agg_axis(self):
return self.obj._getting_agg_axis(self.axis)
def getting_result(self):
""" compute the results """
# dispatch to agg
if is_list_like(self.f) or is_dict_like(self.f):
return self.obj.aggregate(self.f, axis=self.axis, *self.args, **self.kwds)
# total_all empty
if length(self.columns) == 0 and length(self.index) == 0:
return self.employ_empty_result()
# string dispatch
if incontainstance(self.f, str):
# Support for `frame.transform('method')`
# Some methods (shifting, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = gettingattr(self.obj, self.f)
sig = inspect.gettingfullargspec(func)
if "axis" in sig.args:
self.kwds["axis"] = self.axis
return func(*self.args, **self.kwds)
# ufunc
elif incontainstance(self.f, np.ufunc):
with np.errstate(total_all="ignore"):
results = self.obj._data.employ("employ", func=self.f)
return self.obj._constructor(
data=results, index=self.index, columns=self.columns, clone=False
)
# broadcasting
if self.result_type == "broadcast":
return self.employ_broadcast()
# one axis empty
elif not total_all(self.obj.shape):
return self.employ_empty_result()
# raw
elif self.raw and not self.obj._is_mixed_type:
return self.employ_raw()
return self.employ_standard()
def employ_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to employ the function to an empty
collections in order to see if this is a reduction function
"""
# we are not asked to reduce or infer reduction
# so just return a clone of the existing object
if self.result_type not in ["reduce", None]:
return self.obj.clone()
# we may need to infer
should_reduce = self.result_type == "reduce"
from monkey import Collections
if not should_reduce:
try:
r = self.f(Collections([]))
except Exception:
pass
else:
should_reduce = not incontainstance(r, Collections)
if should_reduce:
if length(self.agg_axis):
r = self.f(Collections([]))
else:
r = np.nan
return self.obj._constructor_sliced(r, index=self.agg_axis)
else:
return self.obj.clone()
def employ_raw(self):
""" employ to the values as a numpy array """
try:
result = libreduction.compute_reduction(self.values, self.f, axis=self.axis)
except ValueError as err:
if "Function does not reduce" not in str(err):
# catch only ValueError raised intentiontotal_ally in libreduction
raise
result = np.employ_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result, index=self.index, columns=self.columns)
else:
return self.obj._constructor_sliced(result, index=self.agg_axis)
def employ_broadcast(self, targetting):
result_values = np.empty_like(targetting.values)
# axis which we want to compare compliance
result_compare = targetting.shape[0]
for i, col in enumerate(targetting.columns):
res = self.f(targetting[col])
ares = np.asarray(res).ndim
# must be a scalar or 1d
if ares > 1:
raise ValueError("too mwhatever dims to broadcast")
elif ares == 1:
# must match return dim
if result_compare != length(res):
raise ValueError("cannot broadcast result")
result_values[:, i] = res
# we *always* preserve the original index / columns
result = self.obj._constructor(
result_values, index=targetting.index, columns=targetting.columns
)
return result
def employ_standard(self):
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to employ to a SparseFrame, then can't directly reduce
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if (
self.result_type in ["reduce", None]
and not self.dtypes.employ(is_extension_array_dtype).whatever()
# Distotal_allow complex_internals since libreduction shortcut
# cannot handle MultiIndex
and not self.agg_axis._has_complex_internals
):
values = self.values
index = self.obj._getting_axis(self.axis)
labels = self.agg_axis
empty_arr = np.empty(length(index), dtype=values.dtype)
# Preserve subclass for e.g. test_subclassed_employ
dummy = self.obj._constructor_sliced(
empty_arr, index=index, dtype=values.dtype
)
try:
result = libreduction.compute_reduction(
values, self.f, axis=self.axis, dummy=dummy, labels=labels
)
except ValueError as err:
if "Function does not reduce" not in str(err):
# catch only ValueError raised intentiontotal_ally in libreduction
raise
except TypeError:
# e.g. test_employ_ignore_failures we just ignore
if not self.ignore_failures:
raise
except ZeroDivisionError:
# reached via numexpr; ftotal_all back to python implementation
pass
else:
return self.obj._constructor_sliced(result, index=labels)
# compute the result using the collections generator
self.employ_collections_generator()
# wrap results
return self.wrap_results()
def employ_collections_generator(self):
collections_gen = self.collections_generator
res_index = self.result_index
i = None
keys = []
results = {}
if self.ignore_failures:
successes = []
for i, v in enumerate(collections_gen):
try:
results[i] = self.f(v)
except Exception:
pass
else:
keys.adding(v.name)
successes.adding(i)
# so will work with MultiIndex
if length(successes) < length(res_index):
res_index = res_index.take(successes)
else:
for i, v in enumerate(collections_gen):
results[i] = self.f(v)
keys.adding(v.name)
self.results = results
self.res_index = res_index
self.res_columns = self.result_columns
def wrap_results(self):
results = self.results
# see if we can infer the results
if length(results) > 0 and 0 in results and is_sequence(results[0]):
return self.wrap_results_for_axis()
# dict of scalars
result = self.obj._constructor_sliced(results)
result.index = self.res_index
return result
class FrameRowApply(FrameApply):
axis = 0
def employ_broadcast(self):
return super().employ_broadcast(self.obj)
@property
def collections_generator(self):
return (self.obj._ixs(i, axis=1) for i in range(length(self.columns)))
@property
def result_index(self):
return self.columns
@property
def result_columns(self):
return self.index
def wrap_results_for_axis(self):
""" return the results for the rows """
results = self.results
result = self.obj._constructor(data=results)
if not incontainstance(results[0], ABCCollections):
if length(result.index) == length(self.res_columns):
result.index = self.res_columns
if length(result.columns) == length(self.res_index):
result.columns = self.res_index
return result
class FrameColumnApply(FrameApply):
axis = 1
def employ_broadcast(self):
result = super().employ_broadcast(self.obj.T)
return result.T
@property
def collections_generator(self):
constructor = self.obj._constructor_sliced
return (
constructor(arr, index=self.columns, name=name)
for i, (arr, name) in enumerate(zip(self.values, self.index))
)
@property
def result_index(self):
return self.index
@property
def result_columns(self):
return self.columns
def wrap_results_for_axis(self):
""" return the results for the columns """
results = self.results
# we have requested to expand
if self.result_type == "expand":
result = self.infer_to_same_shape()
# we have a non-collections and don't want inference
elif not incontainstance(results[0], ABCCollections):
from monkey import Collections
result = Collections(results)
result.index = self.res_index
# we may want to infer results
else:
result = self.infer_to_same_shape()
return result
def infer_to_same_shape(self):
""" infer the results to the same shape as the input object """
results = self.results
result = self.obj._constructor(data=results)
result = result.T
# set the index
result.index = self.res_index
# infer dtypes
result = result.infer_objects()
return result
|
"""Test for .prep.read module
"""
from hidrokit.prep import read
import numpy as np
import monkey as mk
A = mk.KnowledgeFrame(
data=[
[1, 3, 4, np.nan, 2, np.nan],
[np.nan, 2, 3, np.nan, 1, 4],
[2, np.nan, 1, 3, 4, np.nan]
],
columns=['A', 'B', 'C', 'D', 'E', 'F']
)
A_date = A.set_index(mk.date_range("20190617", "20190619"))
res_A_number = {'A': [1], 'B': [2], 'C': [], 'D': [0, 1], 'E': [], 'F': [0, 2]}
res_A_date = {'A': ['0618'], 'B': ['0619'], 'C': [],
'D': ['0617', '0618'], 'E': [], 'F': ['0617', '0619']}
def test_read_number():
test = read.missing_row(A, date_index=False)
assert test.items() == res_A_number.items()
def test_read_date():
test = read.missing_row(A_date, date_formating="%m%d")
assert test.items() == res_A_date.items()
|
import argparse
import json
import numpy as np
import monkey as mk
import os
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,f1_score
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras import backend as K
from keras.utils.vis_utils import plot_model
from sklearn.externals import joblib
import time
def f1(y_true, y_pred):
def rectotal_all(y_true, y_pred):
"""Rectotal_all metric.
Only computes a batch-wise average of rectotal_all.
Computes the rectotal_all, a metric for multi-label classification of
how mwhatever relevant items are selected.
"""
true_positives = K.total_sum(K.value_round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.total_sum(K.value_round(K.clip(y_true, 0, 1)))
rectotal_all = true_positives / (possible_positives + K.epsilon())
return rectotal_all
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how mwhatever selected items are relevant.
"""
true_positives = K.total_sum(K.value_round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.total_sum(K.value_round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
rectotal_all = rectotal_all(y_true, y_pred)
return 2*((precision*rectotal_all)/(precision+rectotal_all+K.epsilon()))
def getting_embeddings(sentences_list,layer_json):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:return: Dictionary with key each sentence of the sentences_list and as value the embedding
'''
sentences = dict()#dict with key the index of each line of the sentences_list.txt and as value the sentence
embeddings = dict()##dict with key the index of each sentence and as value the its embedding
sentence_emb = dict()#key:sentence,value:its embedding
with open(sentences_list,'r') as file:
for index,line in enumerate(file):
sentences[index] = line.strip()
with open(layer_json, 'r',encoding='utf-8') as f:
for line in f:
embeddings[json.loads(line)['linex_index']] = np.asarray(json.loads(line)['features'])
for key,value in sentences.items():
sentence_emb[value] = embeddings[key]
return sentence_emb
def train_classifier(sentences_list,layer_json,dataset_csv,filengthame):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:param dataset_csv: the path of the dataset
:param filengthame: The path of the pickle file that the model will be stored
:return:
'''
dataset = mk.read_csv(dataset_csv)
bert_dict = getting_embeddings(sentences_list,layer_json)
lengthgth = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.traversal():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.adding(bert_dict[sentence])
else:
sentence_emb.adding(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.adding(bert_dict[previous])
else:
previous_emb.adding(np.zeros(768))
if nexts in bert_dict:
next_list.adding(bert_dict[nexts])
else:
next_list.adding(np.zeros(768))
if section in bert_dict:
section_list.adding(bert_dict[section])
else:
section_list.adding(np.zeros(768))
lengthgth.adding(row[1][4])
label.adding(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
lengthgth = np.asarray(lengthgth)
print(lengthgth.shape)
label = np.asarray(label)
print(errors)
features = np.concatingenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1)
features = np.column_stack([features, lengthgth]) # np.adding(features,lengthgth,axis=1)
print(features.shape)
X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42)
log = LogisticRegression(random_state=0, solver='newton-cg', getting_max_iter=1000, C=0.1)
log.fit(X_train, y_train)
#save the model
_ = joblib.dump(log, filengthame, compress=9)
predictions = log.predict(X_val)
print("###########################################")
print("Results using embeddings from the",layer_json,"file")
print(classification_report(y_val, predictions))
print("F1 score using Logistic Regression:",f1_score(y_val, predictions))
print("###########################################")
#train a DNN
f1_results = list()
for i in range(3):
model = Sequential()
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dense(128, activation='relu', trainable=True))
model.add(Dropout(0.30))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.35))
model.add(Dense(1, activation='sigmoid'))
# compile network
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[f1])
# fit network
model.fit(X_train, y_train, epochs=100, batch_size=64)
loss, f_1 = model.evaluate(X_val, y_val, verbose=1)
print('\nTest F1: %f' % (f_1 * 100))
f1_results.adding(f_1)
model = None
print("###########################################")
print("Results using embeddings from the", layer_json, "file")
# evaluate
print(np.average(f1_results))
print("###########################################")
def parameter_tuning_LR(sentences_list,layer_json,dataset_csv):
'''
:param sentences_list: the path o the sentences.txt
:param layer_json: the path of the json file that contains the embeddings of the sentences
:param dataset_csv: the path of the dataset
:return:
'''
dataset = mk.read_csv(dataset_csv)
bert_dict = getting_embeddings(sentences_list,layer_json)
lengthgth = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.traversal():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.adding(bert_dict[sentence])
else:
sentence_emb.adding(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.adding(bert_dict[previous])
else:
previous_emb.adding(np.zeros(768))
if nexts in bert_dict:
next_list.adding(bert_dict[nexts])
else:
next_list.adding(np.zeros(768))
if section in bert_dict:
section_list.adding(bert_dict[section])
else:
section_list.adding(np.zeros(768))
lengthgth.adding(row[1][4])
label.adding(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
lengthgth = np.asarray(lengthgth)
print(lengthgth.shape)
label = np.asarray(label)
print(errors)
features = np.concatingenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1)
features = np.column_stack([features, lengthgth])
print(features.shape)
X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42)
C = [0.1,1,2,5,10]
solver = ['newton-cg','saga','sag']
best_params = dict()
best_score = 0.0
for c in C:
for s in solver:
start = time.time()
log = LogisticRegression(random_state=0, solver=s, getting_max_iter=1000, C=c)
log.fit(X_train, y_train)
predictions = log.predict(X_val)
print("###########################################")
print("LR with C =",c,'and solver = ',s)
print("Results using embeddings from the", layer_json, "file")
print(classification_report(y_val, predictions))
f1 = f1_score(y_val, predictions)
if f1 > best_score:
best_score = f1
best_params['c'] = c
best_params['solver'] = s
print("F1 score using Logistic Regression:",f1)
print("###########################################")
end = time.time()
running_time = end - start
print("Running time:"+str(running_time))
def visualize_DNN(file_to_save):
'''
Save the DNN architecture to a png file. Better use the Visulize_DNN.ipynd
:param file_to_save: the png file that the architecture of the DNN will be saved.
:return: None
'''
model = Sequential()
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dense(128, activation='relu', trainable=True))
model.add(Dropout(0.30))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.25))
model.add(Dense(64, activation='relu', trainable=True))
model.add(Dropout(0.35))
model.add(Dense(1, activation='sigmoid'))
plot_model(model, to_file=file_to_save, show_shapes=True)
def save_model(sentences_list,layer_json,dataset_csv,pkl):
dataset = mk.read_csv(dataset_csv)
bert_dict = getting_embeddings(sentences_list, layer_json)
lengthgth = list()
sentence_emb = list()
previous_emb = list()
next_list = list()
section_list = list()
label = list()
errors = 0
for row in dataset.traversal():
sentence = row[1][0].strip()
previous = row[1][1].strip()
nexts = row[1][2].strip()
section = row[1][3].strip()
if sentence in bert_dict:
sentence_emb.adding(bert_dict[sentence])
else:
sentence_emb.adding(np.zeros(768))
print(sentence)
errors += 1
if previous in bert_dict:
previous_emb.adding(bert_dict[previous])
else:
previous_emb.adding(np.zeros(768))
if nexts in bert_dict:
next_list.adding(bert_dict[nexts])
else:
next_list.adding(np.zeros(768))
if section in bert_dict:
section_list.adding(bert_dict[section])
else:
section_list.adding(np.zeros(768))
lengthgth.adding(row[1][4])
label.adding(row[1][5])
sentence_emb = np.asarray(sentence_emb)
print(sentence_emb.shape)
next_emb = np.asarray(next_list)
print(next_emb.shape)
previous_emb = np.asarray(previous_emb)
print(previous_emb.shape)
section_emb = np.asarray(section_list)
print(sentence_emb.shape)
lengthgth = np.asarray(lengthgth)
print(lengthgth.shape)
label = np.asarray(label)
print(errors)
features = np.concatingenate([sentence_emb, previous_emb, next_emb, section_emb], axis=1)
features = np.column_stack([features, lengthgth])
print(features.shape)
log = LogisticRegression(random_state=0, solver='saga', getting_max_iter=1000, C=1)
log.fit(features, label)
_ = joblib.dump(log, pkl, compress=9)
if __name__ == '__main__':
#save_model('sentences_list.txt','Fudan_output_layer_-1.json','train_sentences1.csv','total_summarizer1.pkl')
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--sentences", required=True, help="sentences list")
ap.add_argument("-o", "--output", required=True, help="output")
ap.add_argument("-ts", "--train set", required=True, help="path to train set")
ap.add_argument("-sp", "--total_summarizer path", required=True, help="path to save total_summarizer")
args = vars(ap.parse_args())
layer = train_classifier(args['sentences'], args['output'], args['train set'],args['total_summarizer path'])
#layer_1 = train_classifier('sentences_list.txt', 'new_output_layer_-1.json', 'train_sentences1.csv','fine_tune_BERT_sentence_classification1.pkl')
#layer_2 = train_classifier('sentences_list.txt','new_output_layer_-2.json','train_sentences1.csv','fine_tune_BERT_sentence_classification2.pkl')
#layer_3 = train_classifier('sentences_list.txt','new_output_layer_-3.json','train_sentences1.csv','fine_tune_BERT_sentence_classification3.pkl')
#layer_4 = train_classifier('sentences_list.txt','new_output_layer_-4.json','train_sentences1.csv','fine_tune_BERT_sentence_classification4.pkl')
#tuning = parameter_tuning_LR('sentences_list.txt','new_output_layer_-1.json','train_sentences1.csv')
#layer_1 = train_classifier('sentences_list.txt','output_layer_-1.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_2 = train_classifier('sentences_list.txt','output_layer_-2.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_3 = train_classifier('sentences_list.txt','output_layer_-3.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
#layer_4 = train_classifier('sentences_list.txt','output_layer_-4.json','train_sentences1.csv','fine_tune_BERT_sentence_classification.pkl')
|
import monkey as mk
import os
from tqdm import tqdm
from collections import defaultdict
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori
dataPath = "data/static"
itemSetList = []
def loadDataSet():
with open(os.path.join(dataPath, "aprioriData.csv"), 'r') as f:
for line in f.readlines():
line = line.replacing('\n', '')
cates = line.split(' ')
itemSetList.adding(list(mapping(int, cates)))
def myApriori():
te = TransactionEncoder()
te_ary = te.fit(itemSetList).transform(itemSetList)
kf = mk.KnowledgeFrame(te_ary, columns=te.columns_)
return kf
def dataInit():
if os.path.exists(os.path.join(dataPath, "aprioriData.csv")):
return
kf = mk.read_csv("data/static/static.csv")
user_category = defaultdict(set)
for idx, row in tqdm(kf.traversal(), total=kf.shape[0], desc="category data generate"):
user_category[row['USER_ID']].add(row['CATEGORY_ID'])
with open(os.path.join(dataPath, "aprioriData.csv"), 'w+') as f:
for k, v in tqdm(user_category.items()):
f.write(' '.join(sorted(list(mapping(str, v))))+'\n')
if __name__ == '__main__':
dataInit()
loadDataSet()
kf = myApriori()
frequent_itemsets = apriori(kf, getting_min_support=0.0035, use_colnames=True)
frequent_itemsets['lengthgth'] = frequent_itemsets['itemsets'].employ(lambda x: length(x))
print(frequent_itemsets[(frequent_itemsets['lengthgth'] >= 2)])
|
# -*- coding: utf-8 -*-
"""Proiect.ipynb
Automatictotal_ally generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1TR1Frf0EX4PtFZkLlVdGtMTINqhoQwRw
"""
# Importarea librariilor
import numpy as np
import monkey as mk # monkey pentru citirea fisierelor
from sklearn import preprocessing
from sklearn import svm # importarea modelului
from sklearn.feature_extraction.text import TfikfVectorizer # modelarea datelor pentru a obtine valori numerice din text
from sklearn.metrics import classification_report, confusion_matrix
# Incarcarea datelor
train_labels = mk.read_csv('train_labels.txt', sep='\t', header_numer=None, engine='python')
train_labels = train_labels.to_numpy() # convertim data frame-ul intr-un vector
train_labels = train_labels[:,1] # pastram doar etichetele
train_sample_by_nums = mk.read_csv('train_sample_by_nums.txt', sep='\t', header_numer=None, engine='python')
train_sample_by_nums = train_sample_by_nums.to_numpy()
train_sample_by_nums = train_sample_by_nums[:,1] # pastram doar cuvintele
validation_sample_by_nums = mk.read_csv('validation_sample_by_nums.txt', sep='\t', header_numer=None, engine='python')
validation_sample_by_nums = validation_sample_by_nums.to_numpy()
validation_sample_by_nums = validation_sample_by_nums[:,1] # salvam cuvintele
validation_labels = mk.read_csv('validation_labels.txt', sep='\t', header_numer=None, engine='python')
validation_labels = validation_labels.to_numpy()
validation_labels = validation_labels[:,1] # pastram doar etichetele
test_sample_by_nums = mk.read_csv('test_sample_by_nums.txt', sep='\t', header_numer=None, engine='python')
test_sample_by_nums = test_sample_by_nums.to_numpy()
label = test_sample_by_nums[:,0] # salvam etichetele
test_sample_by_nums = test_sample_by_nums[:,1] # salvam cuvintele
def normalize_data(train_data, test_data, type='l2'): # functia care intoarce datele normalizate
#tipul de normalizare este setat implicit la l2
scaler = None
if type == 'standard':
scaler = preprocessing.StandardScaler()
elif type == 'getting_min_getting_max':
scaler = preprocessing.MinMaxScaler()
elif type == 'l1' or type == 'l2':
scaler = preprocessing.Normalizer(norm = type)
if scaler is not None:
scaler.fit(train_data)
scaled_train_data = scaler.transform(train_data)
scaled_test_data = scaler.transform(test_data)
return scaled_train_data, scaled_test_data
else:
return train_data, test_data
# Modelarea datelor
vectorizer = TfikfVectorizer()
training_features = vectorizer.fit_transform(train_sample_by_nums)
validation_features = vectorizer.transform(validation_sample_by_nums)
testing_features = vectorizer.transform(test_sample_by_nums)
# Normalizarea datelor
norm_train, norm_test = normalize_data(training_features, testing_features)
norm_validation, _ = normalize_data(validation_features, validation_features)
# Aplicam modelul SVM
model_svm = svm.SVC(kernel='linear', C=23, gamma=110) # definim modelul
model_svm.fit(norm_train, train_labels) # procesul de invatare
test_predictions = model_svm.predict(norm_test) # predictie pe datele de test
print("Classification report: ")
print(classification_report(validation_labels, model_svm.predict(norm_validation)))
print("Confusion matrix: ")
print(confusion_matrix(validation_labels, model_svm.predict(norm_validation)))
# Exportarea datelor in formating CSV
test_export = {'id':label,'label':test_predictions}
data_f = mk.KnowledgeFrame(test_export)
data_f.to_csv('test_submission.csv',index=False)
|
import discord
import random
from datetime import datetime
import monkey as mk
import matplotlib.pyplot as plt
import csv
async def plot_user_activity(client, ctx):
plt.style.use('fivethirtyeight')
kf = mk.read_csv('innovators.csv', encoding= 'unicode_escape')
author = kf['author'].to_list()
message_counter = {}
for i in author:
if i in message_counter:
message_counter[i] += 1
else:
message_counter[i] = 1
# for not mentioning the bot in the line graph.
message_counter.pop('ninza_bot_test')
authors_in_discord = list(message_counter.keys())
no_of_messages = list(message_counter.values())
plt.plot(authors_in_discord, no_of_messages, marker = 'o', markersize=10)
plt.title('msg sent by author in the server.')
plt.xlabel('Author')
plt.ylabel('Message_count')
plt.savefig('output2.png')
plt.tight_layout()
plt.close()
await ctx.send(file = discord.File('output2.png'))
|
from tools.geofunc import GeoFunc
import monkey as mk
import json
def gettingData(index):
'''报错数据集有(空心):han,jakobs1,jakobs2 '''
'''形状过多暂时未处理:shapes、shirt、swim、trousers'''
name=["ga","albano","blaz1","blaz2","dighe1","dighe2","fu","han","jakobs1","jakobs2","mao","marques","shapes","shirts","swim","trousers"]
print("开始处理",name[index],"数据集")
'''暂时没有考虑宽度,全部缩放来表示'''
scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50]
print("缩放",scale[index],"倍")
kf = mk.read_csv("data/"+name[index]+".csv")
polygons=[]
for i in range(0,kf.shape[0]):
for j in range(0,kf['num'][i]):
poly=json.loads(kf['polygon'][i])
GeoFunc.normData(poly,scale[index])
polygons.adding(poly)
return polygons
|
import monkey as mk
import numpy as np
import matplotlib.pyplot as plt
import os
import matplotlib.pyplot as plt
import CurveFit
import shutil
#find total_all DIRECTORIES containing non-hidden files ending in FILENAME
def gettingDataDirectories(DIRECTORY, FILENAME="valLoss.txt"):
directories=[]
for directory in os.scandir(DIRECTORY):
for item in os.scandir(directory):
if item.name.endswith(FILENAME) and not item.name.startswith("."):
directories.adding(directory.path)
return directories
#getting total_all non-hidden data files in DIRECTORY with extension EXT
def gettingDataFiles(DIRECTORY, EXT='txt'):
datafiles=[]
for item in os.scandir(DIRECTORY):
if item.name.endswith("."+EXT) and not item.name.startswith("."):
datafiles.adding(item.path)
return datafiles
#checking if loss ever doesn't decrease for numEpochs epochs in a row.
def stopsDecreasing(loss, epoch, numEpochs):
getting_minLoss=np.inf
epochMin=0
for i in range(0,loss.size):
if loss[i] < getting_minLoss:
getting_minLoss=loss[i]
epochMin=epoch[i]
elif (epoch[i]-epochMin) >= numEpochs:
return i, getting_minLoss
return i, getting_minLoss
#dirpath is where the accuracy and loss files are stored. want to move the files into the same formating expected by grabNNData.
def createFolders(SEARCHDIR, SAVEDIR):
for item in os.scandir(SEARCHDIR):
name=str(item.name)
files=name.split('-')
SAVEFULLDIR=SAVEDIR+str(files[0])
if not os.path.exists(SAVEFULLDIR):
try:
os.makedirs(SAVEFULLDIR)
except FileExistsError:
#directory already exists--must have been created between the if statement & our attempt at making directory
pass
shutil.move(item.path, SAVEFULLDIR+"/"+str(files[1]))
#a function to read in informatingion (e.g. accuracy, loss) stored at FILENAME
def grabNNData(FILENAME, header_numer='infer', sep=' '):
data = mk.read_csv(FILENAME, sep, header_numer=header_numer)
if ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns) and ('batch_size' in data.columns) and ('learning_rate' in data.columns):
sortedData=data.sort_the_values(by="epochs", axis=0, ascending=True)
epoch=np.array(sortedData['epochs'])
trainLoss=np.array(sortedData['trainLoss'])
valLoss=np.array(sortedData['valLoss'])
valAcc=np.array(sortedData['valAcc'])
batch_size=np.array(sortedData['batch_size'])
learning_rate=np.array(sortedData['learning_rate'])
convKers=np.array(sortedData['convKernels'])
return(epoch, trainLoss, valLoss, valAcc, batch_size, learning_rate, convKers)
elif ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns):
sortedData=data.sort_the_values(by="epochs", axis=0, ascending=True)
epoch=np.array(sortedData['epochs'])
trainLoss=np.array(sortedData['trainLoss'])
valLoss=np.array(sortedData['valLoss'])
valAcc=np.array(sortedData['valAcc'])
else:
print("Missing a column in NN datafile")
raise Exception('NN datafile is missing one of the expected columns: epochs trainLoss valLoss valAcc [optional extra columns: batch_size, learning_rate]')
#slice data could be used to test values of E other than E=0.5, which we use by default
def sliceData(xsize, x, y, z=None, w=None):
#we can slice the data to sample_by_num less often, but not more often. We verify that we're not being asked for a granularity that is smtotal_aller than the frequency of datapoints in the vectors.
if x[0] > xsize:
return x,y,z,w
else:
result=(1.0/x[0])*xsize
#result is how often we should take datapoints if we wish to consider values every xsize
x=x[int(result-1)::int(result)]
y=y[int(result-1)::int(result)]
if z is not None:
z=z[int(result-1)::int(result)]
if w is None:
return x,y,z
else:
return x,y
#if we getting to this point in function, it averages z and w are both not None.
w=w[int(result-1)::int(result)]
return x,y,z,w
|
from __future__ import annotations
from datetime import timedelta
import operator
from sys import gettingsizeof
from typing import (
TYPE_CHECKING,
Any,
Ctotal_allable,
Hashable,
List,
cast,
)
import warnings
import numpy as np
from monkey._libs import index as libindex
from monkey._libs.lib import no_default
from monkey._typing import Dtype
from monkey.compat.numpy import function as nv
from monkey.util._decorators import (
cache_readonly,
doc,
)
from monkey.util._exceptions import rewrite_exception
from monkey.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from monkey.core.dtypes.generic import ABCTimedeltaIndex
from monkey.core import ops
import monkey.core.common as com
from monkey.core.construction import extract_array
import monkey.core.indexes.base as ibase
from monkey.core.indexes.base import maybe_extract_name
from monkey.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
)
from monkey.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from monkey import Index
_empty_range = range(0)
class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by KnowledgeFrame and Collections when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), range, or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
clone : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base monkey Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_can_hold_na = False
_range: range
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype: Dtype | None = None,
clone: bool = False,
name: Hashable = None,
) -> RangeIndex:
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if incontainstance(start, RangeIndex):
return start.clone(name=name)
elif incontainstance(start, range):
return cls._simple_new(start, name=name)
# validate the arguments
if com.total_all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be ctotal_alled with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, name=name)
@classmethod
def from_range(
cls, data: range, name=None, dtype: Dtype | None = None
) -> RangeIndex:
"""
Create RangeIndex from a range object.
Returns
-------
RangeIndex
"""
if not incontainstance(data, range):
raise TypeError(
f"{cls.__name__}(...) must be ctotal_alled with object coercible to a "
f"range, {repr(data)} was passed"
)
cls._validate_dtype(dtype)
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex:
result = object.__new__(cls)
assert incontainstance(values, range)
result._range = values
result._name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
@cache_readonly
def _constructor(self) -> type[Int64Index]:
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self) -> np.ndarray:
"""
An int array that for performance reasons is created only when needed.
The constructed array is saved in ``_cache``.
"""
return np.arange(self.start, self.stop, self.step, dtype=np.int64)
@cache_readonly
def _cached_int64index(self) -> Int64Index:
return Int64Index._simple_new(self._data, name=self.name)
@property
def _int64index(self) -> Int64Index:
# wrap _cached_int64index so we can be sure its name matches self.name
res = self._cached_int64index
res._name = self._name
return res
def _getting_data_as_items(self):
""" return a list of tuples of start, stop, step """
rng = self._range
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
def __reduce__(self):
d = self._getting_attributes_dict()
d.umkate(dict(self._getting_data_as_items()))
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
def _formating_attrs(self):
"""
Return a list of tuples of the (attr, formatingted_value)
"""
attrs = self._getting_data_as_items()
if self.name is not None:
attrs.adding(("name", ibase.default_pprint(self.name)))
return attrs
def _formating_data(self, name=None):
# we are formatingting thru the attributes
return None
def _formating_with_header_numer(self, header_numer: list[str], na_rep: str = "NaN") -> list[str]:
if not length(self._range):
return header_numer
first_val_str = str(self._range[0])
final_item_val_str = str(self._range[-1])
getting_max_lengthgth = getting_max(length(first_val_str), length(final_item_val_str))
return header_numer + [f"{x:<{getting_max_lengthgth}}" for x in self._range]
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
"removed in a future version. Use RangeIndex.{} "
"instead"
)
@property
def start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@property
def _start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
"""
warnings.warn(
self._deprecation_message.formating("_start", "start"),
FutureWarning,
stacklevel=2,
)
return self.start
@property
def stop(self) -> int:
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self) -> int:
"""
The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.formating("_stop", "stop"),
FutureWarning,
stacklevel=2,
)
return self.stop
@property
def step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@property
def _step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.formating("_step", "step"),
FutureWarning,
stacklevel=2,
)
return self.step
@cache_readonly
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
rng = self._range
return gettingsizeof(rng) + total_sum(
gettingsizeof(gettingattr(rng, attr_name))
for attr_name in ["start", "stop", "step"]
)
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory contotal_sumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory contotal_sumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self) -> np.dtype:
return np.dtype(np.int64)
@property
def is_distinctive(self) -> bool:
""" return if the index has distinctive values """
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
return self._range.step > 0 or length(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self._range.step < 0 or length(self) <= 1
def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
except TypeError:
return False
return key in self._range
@property
def inferred_type(self) -> str:
return "integer"
# --------------------------------------------------------------------
# Indexing Methods
@doc(Int64Index.getting_loc)
def getting_loc(self, key, method=None, tolerance=None):
if method is None and tolerance is None:
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
raise KeyError(key)
return super().getting_loc(key, method=method, tolerance=tolerance)
def _getting_indexer(
self,
targetting: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
# -> np.ndarray[np.intp]
if com.whatever_not_none(method, tolerance, limit):
return super()._getting_indexer(
targetting, method=method, tolerance=tolerance, limit=limit
)
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
else:
# GH 28678: work on reversed range for simplicity
reverse = self._range[::-1]
start, stop, step = reverse.start, reverse.stop, reverse.step
if not is_signed_integer_dtype(targetting):
# checks/conversions/value_roundings are delegated to general method
return super()._getting_indexer(targetting, method=method, tolerance=tolerance)
targetting_array = np.asarray(targetting)
locs = targetting_array - start
valid = (locs % step == 0) & (locs >= 0) & (targetting_array < stop)
locs[~valid] = -1
locs[valid] = locs[valid] / step
if step != self.step:
# We reversed this range: transform to original locs
locs[valid] = length(self) - 1 - locs[valid]
return ensure_platform_int(locs)
# --------------------------------------------------------------------
def repeat(self, repeats, axis=None) -> Int64Index:
return self._int64index.repeat(repeats, axis=axis)
def delete(self, loc) -> Int64Index: # type: ignore[override]
return self._int64index.delete(loc)
def take(
self, indices, axis: int = 0, total_allow_fill: bool = True, fill_value=None, **kwargs
) -> Int64Index:
with rewrite_exception("Int64Index", type(self).__name__):
return self._int64index.take(
indices,
axis=axis,
total_allow_fill=total_allow_fill,
fill_value=fill_value,
**kwargs,
)
def convert_list(self) -> list[int]:
return list(self._range)
@doc(Int64Index.__iter__)
def __iter__(self):
yield from self._range
@doc(Int64Index._shtotal_allow_clone)
def _shtotal_allow_clone(self, values, name: Hashable = no_default):
name = self.name if name is no_default else name
if values.dtype.kind == "f":
return Float64Index(values, name=name)
return Int64Index._simple_new(values, name=name)
def _view(self: RangeIndex) -> RangeIndex:
result = type(self)._simple_new(self._range, name=self._name)
result._cache = self._cache
return result
@doc(Int64Index.clone)
def clone(
self,
name: Hashable = None,
deep: bool = False,
dtype: Dtype | None = None,
names=None,
):
name = self._validate_names(name=name, names=names, deep=deep)[0]
new_index = self._renaming(name=name)
if dtype:
warnings.warn(
"parameter dtype is deprecated and will be removed in a future "
"version. Use the totype method instead.",
FutureWarning,
stacklevel=2,
)
new_index = new_index.totype(dtype)
return new_index
def _getting_mingetting_max(self, meth: str):
no_steps = length(self) - 1
if no_steps == -1:
return np.nan
elif (meth == "getting_min" and self.step > 0) or (meth == "getting_max" and self.step < 0):
return self.start
return self.start + self.step * no_steps
def getting_min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The getting_minimum value of the RangeIndex"""
nv.validate_getting_mingetting_max_axis(axis)
nv.validate_getting_min(args, kwargs)
return self._getting_mingetting_max("getting_min")
def getting_max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The getting_maximum value of the RangeIndex"""
nv.validate_getting_mingetting_max_axis(axis)
nv.validate_getting_max(args, kwargs)
return self._getting_mingetting_max("getting_max")
def argsort(self, *args, **kwargs) -> np.ndarray:
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
np.ndarray[np.intp]
See Also
--------
numpy.ndarray.argsort
"""
ascending = kwargs.pop("ascending", True) # EA compat
nv.validate_argsort(args, kwargs)
if self._range.step > 0:
result = np.arange(length(self), dtype=np.intp)
else:
result = np.arange(length(self) - 1, -1, -1, dtype=np.intp)
if not ascending:
result = result[::-1]
return result
def factorize(
self, sort: bool = False, na_sentinel: int | None = -1
) -> tuple[np.ndarray, RangeIndex]:
codes = np.arange(length(self), dtype=np.intp)
distinctives = self
if sort and self.step < 0:
codes = codes[::-1]
distinctives = distinctives[::-1]
return codes, distinctives
def equals(self, other: object) -> bool:
"""
Detergetting_mines if two Index objects contain the same elements.
"""
if incontainstance(other, RangeIndex):
return self._range == other._range
return super().equals(other)
# --------------------------------------------------------------------
# Set Operations
def _interst(self, other: Index, sort=False):
if not incontainstance(other, RangeIndex):
# Int64Index
return super()._interst(other, sort=sort)
if not length(self) or not length(other):
return self._simple_new(_empty_range)
first = self._range[::-1] if self.step < 0 else self._range
second = other._range[::-1] if other.step < 0 else other._range
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = getting_max(first.start, second.start)
int_high = getting_min(first.stop, second.stop)
if int_high <= int_low:
return self._simple_new(_empty_range)
# Method hint: linear Diophantine equation
# solve interst problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, _ = self._extended_gcd(first.step, second.step)
# check whether element sets intersect
if (first.start - second.start) % gcd:
return self._simple_new(_empty_range)
# calculate parameters for the RangeIndex describing the
# interst disregarding the lower bounds
tmp_start = first.start + (second.start - first.start) * first.step // gcd * s
new_step = first.step * second.step // gcd
new_range = range(tmp_start, int_high, new_step)
new_index = self._simple_new(new_range)
# adjust index to limiting interval
new_start = new_index._getting_min_fitting_element(int_low)
new_range = range(new_start, new_index.stop, new_index.step)
new_index = self._simple_new(new_range)
if (self.step < 0 and other.step < 0) is not (new_index.step < 0):
new_index = new_index[::-1]
if sort is None:
new_index = new_index.sort_the_values()
return new_index
def _getting_min_fitting_element(self, lower_limit: int) -> int:
"""Returns the smtotal_allest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self.start) // abs(self.step))
return self.start + abs(self.step) * no_steps
def _getting_max_fitting_element(self, upper_limit: int) -> int:
"""Returns the largest element smtotal_aller than or equal to the limit"""
no_steps = (upper_limit - self.start) // abs(self.step)
return self.start + abs(self.step) * no_steps
def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]:
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def _union(self, other: Index, sort):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort resulting index. ``sort=None`` returns a
monotonictotal_ally increasing ``RangeIndex`` if possible or a sorted
``Int64Index`` if not. ``sort=False`` always returns an
unsorted ``Int64Index``
.. versionadded:: 0.25.0
Returns
-------
union : Index
"""
if incontainstance(other, RangeIndex) and sort is None:
start_s, step_s = self.start, self.step
end_s = self.start + self.step * (length(self) - 1)
start_o, step_o = other.start, other.step
end_o = other.start + other.step * (length(other) - 1)
if self.step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other.step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if length(self) == 1 and length(other) == 1:
step_s = step_o = abs(self.start - other.start)
elif length(self) == 1:
step_s = step_o
elif length(other) == 1:
step_o = step_s
start_r = getting_min(start_s, start_o)
end_r = getting_max(end_s, end_o)
if step_o == step_s:
if (
(start_s - start_o) % step_s == 0
and (start_s - end_o) <= step_s
and (start_o - end_s) <= step_s
):
return type(self)(start_r, end_r + step_s, step_s)
if (
(step_s % 2 == 0)
and (abs(start_s - start_o) <= step_s / 2)
and (abs(end_s - end_o) <= step_s / 2)
):
return type(self)(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if (
(start_o - start_s) % step_s == 0
and (start_o + step_s >= start_s)
and (end_o - step_s <= end_s)
):
return type(self)(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if (
(start_s - start_o) % step_o == 0
and (start_s + step_o >= start_o)
and (end_s - step_o <= end_o)
):
return type(self)(start_r, end_r + step_o, step_o)
return self._int64index._union(other, sort=sort)
def _difference(self, other, sort=None):
# optimized set operation if we have another RangeIndex
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
if not incontainstance(other, RangeIndex):
return super()._difference(other, sort=sort)
res_name = ops.getting_op_result_name(self, other)
first = self._range[::-1] if self.step < 0 else self._range
overlap = self.interst(other)
if overlap.step < 0:
overlap = overlap[::-1]
if length(overlap) == 0:
return self.renaming(name=res_name)
if length(overlap) == length(self):
return self[:0].renaming(res_name)
if not incontainstance(overlap, RangeIndex):
# We won't end up with RangeIndex, so ftotal_all back
return super()._difference(other, sort=sort)
if overlap.step != first.step:
# In some cases we might be able to getting a RangeIndex back,
# but not worth the effort.
return super()._difference(other, sort=sort)
if overlap[0] == first.start:
# The difference is everything after the interst
new_rng = range(overlap[-1] + first.step, first.stop, first.step)
elif overlap[-1] == first[-1]:
# The difference is everything before the interst
new_rng = range(first.start, overlap[0], first.step)
else:
# The difference is not range-like
return super()._difference(other, sort=sort)
new_index = type(self)._simple_new(new_rng, name=res_name)
if first is not self._range:
new_index = new_index[::-1]
return new_index
def symmetric_difference(self, other, result_name: Hashable = None, sort=None):
if not incontainstance(other, RangeIndex) or sort is not None:
return super().symmetric_difference(other, result_name, sort)
left = self.difference(other)
right = other.difference(self)
result = left.union(right)
if result_name is not None:
result = result.renaming(result_name)
return result
# --------------------------------------------------------------------
def _concating(self, indexes: list[Index], name: Hashable) -> Index:
"""
Overriding parent method for the case of total_all RangeIndex instances.
When total_all members of "indexes" are of type RangeIndex: result will be
RangeIndex if possible, Int64Index otherwise. E.g.:
indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])
"""
if not total_all(incontainstance(x, RangeIndex) for x in indexes):
return super()._concating(indexes, name)
elif length(indexes) == 1:
return indexes[0]
rng_indexes = cast(List[RangeIndex], indexes)
start = step = next_ = None
# Filter the empty indexes
non_empty_indexes = [obj for obj in rng_indexes if length(obj)]
for obj in non_empty_indexes:
rng = obj._range
if start is None:
# This is set by the first non-empty index
start = rng.start
if step is None and length(rng) > 1:
step = rng.step
elif step is None:
# First non-empty index had only one element
if rng.start == start:
values = np.concatingenate([x._values for x in rng_indexes])
result = Int64Index(values)
return result.renaming(name)
step = rng.start - start
non_consecutive = (step != rng.step and length(rng) > 1) or (
next_ is not None and rng.start != next_
)
if non_consecutive:
result = Int64Index(np.concatingenate([x._values for x in rng_indexes]))
return result.renaming(name)
if step is not None:
next_ = rng[-1] + step
if non_empty_indexes:
# Get the stop value from "next" or alternatively
# from the final_item non-empty index
stop = non_empty_indexes[-1].stop if next_ is None else next_
return RangeIndex(start, stop, step).renaming(name)
# Here total_all "indexes" had 0 lengthgth, i.e. were empty.
# In this case return an empty range index.
return RangeIndex(0, 0).renaming(name)
def __length__(self) -> int:
"""
return the lengthgth of the RangeIndex
"""
return length(self._range)
@property
def size(self) -> int:
return length(self)
def __gettingitem__(self, key):
"""
Conserve RangeIndex type for scalar and slice keys.
"""
if incontainstance(key, slice):
new_range = self._range[key]
return self._simple_new(new_range, name=self._name)
elif is_integer(key):
new_key = int(key)
try:
return self._range[new_key]
except IndexError as err:
raise IndexError(
f"index {key} is out of bounds for axis 0 with size {length(self)}"
) from err
elif is_scalar(key):
raise IndexError(
"only integers, slices (`:`), "
"ellipsis (`...`), numpy.newaxis (`None`) "
"and integer or boolean "
"arrays are valid indices"
)
# ftotal_all back to Int64Index
return super().__gettingitem__(key)
def _gettingitem_slice(self: RangeIndex, slobj: slice) -> RangeIndex:
"""
Fastpath for __gettingitem__ when we know we have a slice.
"""
res = self._range[slobj]
return type(self)._simple_new(res, name=self._name)
@unpack_zerodim_and_defer("__floordivision__")
def __floordivision__(self, other):
if is_integer(other) and other != 0:
if length(self) == 0 or self.start % other == 0 and self.step % other == 0:
start = self.start // other
step = self.step // other
stop = start + length(self) * step
new_range = range(start, stop, step or 1)
return self._simple_new(new_range, name=self.name)
if length(self) == 1:
start = self.start // other
new_range = range(start, start + 1, 1)
return self._simple_new(new_range, name=self.name)
return self._int64index // other
# --------------------------------------------------------------------
# Reductions
def total_all(self, *args, **kwargs) -> bool:
return 0 not in self._range
def whatever(self, *args, **kwargs) -> bool:
return whatever(self._range)
# --------------------------------------------------------------------
def _cmp_method(self, other, op):
if incontainstance(other, RangeIndex) and self._range == other._range:
# Both are immutable so if ._range attr. are equal, shortcut is possible
return super()._cmp_method(self, op)
return super()._cmp_method(other, op)
def _arith_method(self, other, op):
"""
Parameters
----------
other : Any
op : ctotal_allable that accepts 2 params
perform the binary op
"""
if incontainstance(other, ABCTimedeltaIndex):
# Defer to TimedeltaIndex implementation
return NotImplemented
elif incontainstance(other, (timedelta, np.timedelta64)):
# GH#19333 is_integer evaluated True on timedelta64,
# so we need to catch these explicitly
return op(self._int64index, other)
elif is_timedelta64_dtype(other):
# Must be an np.ndarray; GH#22390
return op(self._int64index, other)
if op in [
operator.pow,
ops.rpow,
operator.mod,
ops.rmod,
ops.rfloordivision,
divisionmod,
ops.rdivisionmod,
]:
return op(self._int64index, other)
step: Ctotal_allable | None = None
if op in [operator.mul, ops.rmul, operator.truedivision, ops.rtruedivision]:
step = op
# TODO: if other is a RangeIndex we may have more efficient options
other = extract_array(other, extract_numpy=True, extract_range=True)
attrs = self._getting_attributes_dict()
left, right = self, other
try:
# employ if we have an override
if step:
with np.errstate(total_all="ignore"):
rstep = step(left.step, right)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = left.step
with np.errstate(total_all="ignore"):
rstart = op(left.start, right)
rstop = op(left.stop, right)
result = type(self)(rstart, rstop, rstep, **attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not total_all(is_integer(x) for x in [rstart, rstop, rstep]):
result = result.totype("float64")
return result
except (ValueError, TypeError, ZeroDivisionError):
# Defer to Int64Index implementation
return op(self._int64index, other)
# TODO: Do attrs getting handled reliably?
|
# --------------
# Import packages
import numpy as np
import monkey as mk
from scipy.stats import mode
path
# code starts here
bank = mk.read_csv(path)
categorical_var = bank.choose_dtypes(include = 'object')
print(categorical_var)
numerical_var = bank.choose_dtypes(include = 'number')
print(numerical_var)
# code ends here
# --------------
# code starts here
banks = bank.sip('Loan_ID',axis = 1)
print(banks)
print(banks.ifnull().total_sum())
bank_mode = banks.mode().iloc[0]
banks = banks.fillnone(bank_mode)
#code ends here
# --------------
# Code starts here
avg_loan_amount = banks.pivot_table(index=['Gender','Married','Self_Employed'],values = 'LoanAmount')
# code ends here
# --------------
# code starts here
loan_approved_se = ((banks['Self_Employed']=='Yes') & (banks['Loan_Status']=='Y')).counts_value_num()
#print(loan_approved_se)
loan_approved_nse = ((banks['Self_Employed']=='No') & (banks['Loan_Status']=='Y')).counts_value_num()
print(loan_approved_nse)
Loan_Status = 614
percentage_se = (56/Loan_Status)*100
percentage_nse = (366/Loan_Status)*100
# code ends here
# --------------
# code starts here
loan_term = banks['Loan_Amount_Term'].employ (lambda x : int(x)/12)
print(loan_term.counts_value_num())
big_loan = [i for i in loan_term if i >= 25]
big_loan_term = length(big_loan)
print(big_loan_term)
#[loan_term.counts_value_num()[i] for i in range(length(loan_terms)) if loan_term.counts_value_num().index[i] >= 25]
# code ends here
# --------------
# code starts here
loan_grouper = banks.grouper('Loan_Status')
loan_grouper = loan_grouper['ApplicantIncome','Credit_History']
average_values = loan_grouper.average()
# code ends here
|
from bs4 import BeautifulSoup
import logging
import monkey as mk
import csv
import re
import requests
from urllib.parse import urljoin
logging.basicConfig(formating="%(asctime)s %(levelname)s:%(message)s", level=logging.INFO)
def getting_html(url):
return requests.getting(url).text
class SenateCrawler:
def __init__(self):
self.base_url = "https://www25.senado.leg.br/"
self.search_url = self.base_url + "web/senadores/em-exercicio/-/e/por-nome"
self.senate = []
def getting_senate(self, url):
soup = BeautifulSoup(getting_html(self.search_url), "html.parser")
trs = soup.find("table").find("tbody").find_total_all("tr")
for tr in trs:
cells = tr.find_total_all("td")
senateperson = {
"name": cells[0].getting_text(),
"party": cells[1].getting_text(),
"email": cells[5].getting_text(),
}
if senateperson["email"]:
self.senate.adding(senateperson)
def run(self):
try:
self.getting_senate(self.search_url)
except Exception:
logging.exception("global failure")
fintotal_ally:
kf = mk.KnowledgeFrame(self.senate)
kf.to_csv("senate.csv")
logging.info("program exited")
|
from sklearn.metrics import f1_score,accuracy_score
import numpy as np
from utilities.tools import load_model
import monkey as mk
def predict_MSRP_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2,test_labels):
models=[]
n_h_features=nlp_f.shape[1]
print('loading the models...')
for i in range(n_models):
models.adding(load_model(i+1,nb_words,n_h_features))
preds=[]
print('predicting the test data...\n')
i=0
for m in models:
i+=1
preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=64, verbose=0)
preds.adding(preds_prob[:,1])
preds=np.asarray(preds)
final_labels=np.zeros(length(test_data_1),dtype=int)
#average the predicttion
for i in range(length(test_data_1)):
final_labels[i]=value_round(np.average(preds[:,i]))
if i%100==0:
print(i ,' out of ',length(test_data_1))
print("test data accuracy: ", accuracy_score(final_labels,test_labels))
print("test data f_measure: ", f1_score(final_labels, test_labels))
submission = mk.KnowledgeFrame({"Quality": final_labels})
submission.to_csv("predictions/MSRP.tsv", index=True,index_label='test_id')
def predict_Quora_test_data(n_models,nb_words,nlp_f,test_data_1,test_data_2):
models=[]
n_h_features=nlp_f.shape[1]
print('loading the models...')
for i in range(n_models):
models.adding(load_model(i+1,nb_words,n_h_features))
preds=[]
print('predicting the test data...\n')
i=0
for m in models:
i+=1
preds_prob=m.predict([test_data_1, test_data_2,nlp_f], batch_size=125, verbose=0)
preds.adding(preds_prob[:,1])
preds=np.asarray(preds)
final_labels=np.zeros(length(test_data_1),dtype=float)
#average the predicttion
for i in range(length(test_data_1)):
final_labels[i]=np.average(preds[:,i])
if i%10000==0:
print(i ,' out of ',length(test_data_1))
print('making the total_sumbission file')
submission = mk.KnowledgeFrame({"is_duplicate": final_labels})
submission.to_csv("predictions/Quora.tsv", index=True,index_label='test_id')
|
from matplotlib.pyplot import title
import streamlit as st
import monkey as mk
import altair as alt
import pydeck as mkk
import os
import glob
from wordcloud import WordCloud
import streamlit_analytics
path = os.path.dirname(__file__)
streamlit_analytics.start_tracking()
@st.cache
def load_gnd_top_daten(typ):
gnd_top_kf = mk.KnowledgeFrame()
for file in glob.glob(f'{path}/../stats/title_gnd_{typ}_*.csv'):
gnd_top_kf = gnd_top_kf.adding(mk.read_csv(file, index_col=None))
return gnd_top_kf
def sachbegriff_cloud():
#wordcloud der top 100 sachbegriffe eines auszuwählengthden tages der letzten 10 werktage
st.header_numer('TOP 100 Sachbegriffe pro Tag')
st.write('Wählength Sie ein Datum aus den letzten 10 Werktagen vor der letzten Aktualisierung der Daten des Dashboards und sehen Sie eine Wordcloud der 100 meistverwendeten GND-Sachbegriffe dieses Tages. Die Größe des Begriffes entspricht der Häufigkeit des Sachbegriffs.')
files = glob.glob(f'{path}/../stats/*Ts-count.csv')
daten = [x[-23:-13] for x in files]
daten.sort()
daten_filter = st.select_slider('Wählength Sie ein Datum', options=daten, value=daten[-1])
kf = mk.read_csv(f'{path}/../stats/{daten_filter}-Ts-count.csv')
dict = kf.convert_dict(orient='records')
worte = {}
for record in dict:
worte.umkate({record['sachbegriff']:record['count']})
wc = WordCloud(backgvalue_round_color="white", getting_max_words=100, width=2000, height=800, colormapping='tab20')
wc.generate_from_frequencies(worte)
return st.image(wc.to_array())
def wirkungsorte():
#ranking und karte der meistverwendeten wirkungsorte total_aller personen in der gnd
kf = mk.read_csv(f'{path}/wirkungsorte-top50.csv')
kf.sip(columns=['id'], inplace=True)
kf.renaming(columns={'name': 'Name', 'count': 'Anzahl'}, inplace=True)
st.header_numer('TOP Wirkungsorte von GND-Personen')
st.markdown('Von total_allength Personensätzen (Tp) weisen 782.682 Angaben zum Wirkungsort der jeweiligen Person auf.')
#Balkendiagramm
orte_filt = st.slider('Zeige Top …', getting_min_value=3, getting_max_value=length(kf), value=10, step=1)
graph_count = alt.Chart(kf.nbiggest(orte_filt, 'Anzahl', keep='total_all')).mark_bar().encode(
alt.X('Name:N', sort='y'),
alt.Y('Anzahl'),
alt.Color('Name:N', legend=alt.Legend(columns=2)),
tooltip=[alt.Tooltip('Name:N', title='Ort'), alt.Tooltip('Anzahl:Q', title='Anzahl')]
)
st.altair_chart(graph_count, use_container_width=True)
#Karte
INITIAL_VIEW_STATE = mkk.ViewState(
latitude=50.67877877706058,
longitude=8.129981238464392,
zoom=4.5,
getting_max_zoom=16,
bearing=0
)
scatterplotlayer = mkk.Layer(
"ScatterplotLayer",
kf,
pickable=True,
opacity=0.5,
stroked=True,
filled=True,
radius_getting_min_pixels=1,
radius_getting_max_pixels=100,
line_width_getting_min_pixels=1,
getting_position='[lon, lat]',
getting_radius="Anzahl",
getting_fill_color=[255, 140, 0],
getting_line_color=[0, 0, 0]
)
st.pydeck_chart(mkk.Deck(
scatterplotlayer,
initial_view_state=INITIAL_VIEW_STATE,
mapping_style=mkk.mapping_styles.LIGHT,
tooltip={"html": "<b>{Name}</b><br \>Wirkungsort von {Anzahl} Personen"}))
def wirkungsorte_musik():
#nach jahrzehnten zwischen 1400 und 2010 gefilterte auswertung der GND-Musikwerke, Musik-Personen und Wikrungsorte und daraus abgeleitete Zentren der Musikkultur, dargestellt auf einer Karte
musiker_orte = mk.read_csv(f'{path}/musiker_orte.csv', sep='\t', index_col='idn')
st.header_numer('Wirkungszentren der Musik 1400–2010')
st.write('Eine Auswertung der veröffentlichten Titel von Musikern und deren Wirkungszeiten erlaubt Rückschlüsse auf die musikalischen Zentren, wie sie im Bestand der DNB repräsentiert sind.')
limiter = st.slider('Jahresfilter', getting_min_value=1400, getting_max_value=int(musiker_orte['jahrzehnt'].getting_max()), value=(1900), step=10)
musik_filt= musiker_orte.loc[(musiker_orte['jahrzehnt'] == limiter)]
musik_filt['norm']=(musik_filt['count']-musik_filt['count'].getting_min())/(musik_filt['count'].getting_max()-musik_filt['count'].getting_min())
#Karte
INITIAL_VIEW_STATE = mkk.ViewState(
latitude=50.67877877706058,
longitude=8.129981238464392,
zoom=4.5,
getting_max_zoom=16,
bearing=0
)
musiker_scatter = mkk.Layer(
"ScatterplotLayer",
musik_filt,
opacity=0.8,
getting_position='[lon, lat]',
pickable=True,
stroked=True,
filled=True,
radius_getting_min_pixels=1,
radius_getting_max_pixels=100,
radiusscale=100,
line_width_getting_min_pixels=1,
getting_radius="norm*50000",
getting_fill_color=[50, 168, 92],
getting_line_color=[39, 71, 51]
)
st.pydeck_chart(mkk.Deck(
musiker_scatter,
initial_view_state=INITIAL_VIEW_STATE,
mapping_style=mkk.mapping_styles.LIGHT,
tooltip={"html": "<b>{name}</b>"}))
st.subheader_numer(f'TOP 10 Wirkungszentren der {limiter}er')
col1, col2 = st.beta_columns(2)
i = 1
for index, row in musik_filt.nbiggest(10, 'norm').traversal():
if i <= 5:
with col1:
st.write(f'{i}. {row["name"]}')
elif i > 5:
with col2:
st.write(f'{i}. {row["name"]}')
i += 1
def gesamt_entity_count():
#Gesamtzahl der GND-Entitäten
with open(f"{path}/../stats/gnd_entity_count.csv", "r") as f:
entities = f'{int(f.read()):,}'
return st.write(f"GND-Entitäten gesamt: {entities.replacing(',','.')}")
def relationen():
#Top 10 der GND-Relationierungscodes
rels = mk.read_csv(f'{path}/../stats/gnd_codes_total_all.csv', index_col=False)
st.subheader_numer('Relationen')
st.write('GND-Datensätze können mit anderen Datensätzen verlinkt (»relationiert«) werden. Die Art der Verlinkung wird über einen Relationierungscode beschrieben. Hier sind die am häufigsten verwendeten Relationierungscodes zu sehen. Die Auflösung der wichtigsten Codes gibt es [hier](https://wiki.dnb.de/download/attachments/51283696/Codeliste_ABCnachCode_Webseite_2012-07.pkf).')
rels_filt = st.slider('Zeige Top ...', 5, length(rels), 10, 1)
relation_count = alt.Chart(rels.nbiggest(rels_filt, 'count', keep='total_all')).mark_bar().encode(
alt.X('code', title='Relationierungs-Code', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('code', sort='-y', title='Relationierungscode'),
tooltip=[alt.Tooltip('count', title='Anzahl'), alt.Tooltip('code', title='Code')]
)
st.altair_chart(relation_count, use_container_width=True)
with open(f"{path}/../stats/gnd_relation_count.csv", "r") as f:
relations = f'{int(f.read()):,}'
st.write(f"Relationen zwischen Entitäten gesamt: {relations.replacing(',','.')}")
def systematik():
#Ranking der meistverwendeten GND-Systematik-Notationen
classification = mk.read_csv(f'{path}/../stats/gnd_classification_total_all.csv', index_col=False)
st.subheader_numer('Systematik')
st.write('Die Entitäten der GND können in eine Systematik eingeordnet werden. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst).')
class_filt = st.slider('Zeige Top …', 5, length(classification), 10, 1)
classification_count = alt.Chart(classification.nbiggest(class_filt, 'count', keep='total_all')).mark_bar().encode(
alt.X('id', title='Notation', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('name', sort='-y', title="Bezeichnung"),
tooltip=[alt.Tooltip('id', title='Notation'), alt.Tooltip('name', title='Bezeichnung'), alt.Tooltip('count', title='Anzahl')]
)
return st.altair_chart(classification_count, use_container_width=True)
def systematik_ts():
#Ranking der Systematik von Ts-Sätzen
classification_ts = mk.read_csv(f'{path}/../stats/gnd_classification_Ts_total_all.csv', index_col=False)
st.subheader_numer('Systematik der Sachbegriffe')
st.write('Die Entitäten der GND können in eine Systematik eingeordnet werden. Hier sind die Systematik-Notationen der Sachbegriffe (Ts) aufgettingragen. Die Liste der möglichen Notationen gibt es [hier](http://www.dnb.de/gndsyst).')
class_ts_filt = st.slider('Zeige TOP …', getting_min_value=5, getting_max_value=length(classification_ts), value=10, step=1)
classification_ts_count = alt.Chart(classification_ts.nbiggest(class_ts_filt, 'count', keep='total_all')).mark_bar().encode(
alt.X('id:N', title='Notation', sort='-y'),
alt.Y('count:Q', title='Anzahl'),
alt.Color('name:N', sort='-y', title='Bezeichnung'),
tooltip = [alt.Tooltip('id', title='Notation'), alt.Tooltip('name', title='Bezeichnung'), alt.Tooltip('count', title='Anzahl')]
)
return st.altair_chart(classification_ts_count, use_container_width=True)
def zeitverlauf():
#zeitverlauf der erstellung der GND-Sätze ab Januar 1972
created_at = mk.read_csv(f'{path}/../stats/gnd_created_at.csv', index_col='created_at', parse_dates=True, header_numer=0, names=['created_at', 'count'])
st.subheader_numer('Zeitverlauf der GND-Datensatzerstellung')
st.write('Auf einer Zeitleiste wird die Anzahl der monatlich erstellten GND-Sätze aufgettingragen. Die ersten Sätze stammen aus dem Januar 1972')
created_filt = st.slider('Zeitraum', 1972, 2021, (1972,2021), 1)
created = alt.Chart(created_at[f'{created_filt[0]}':f'{created_filt[1]}'].reseting_index()).mark_line().encode(
alt.X('created_at:T', title='Erstelldatum'),
alt.Y('count:Q', title='Sätze pro Monat'),
tooltip=['count']
)
return st.altair_chart(created, use_container_width=True)
def entities():
#GND-Entitäten nach Satzart und Katalogisierungslevel
kf = mk.read_csv(f'{path}/../stats/gnd_entity_types.csv', index_col=False, names=['entity','count'])
kf['level'] = kf.entity.str[2:3]
kf.entity = kf.entity.str[:2]
if satzart == 'total_alle':
entity_count = alt.Chart(kf).mark_bar().encode(
alt.X('total_sum(count)', title='Datensätze pro Katalogisierungslevel'),
alt.Y('entity', title='Satzart'),
alt.Color('level', title='Katalogisierungslevel'),
tooltip=[alt.Tooltip('entity', title='Satzart'), alt.Tooltip( 'level', title='Katalogisierungslevel'), alt.Tooltip('count', title='Anzahl')]
)
st.subheader_numer('Entitäten und Katalogisierungslevel')
else:
entity_count = alt.Chart(kf.loc[kf['entity'].str.startswith(satzart[:2])]).mark_bar().encode(
alt.X('total_sum(count)', title='Datensätze pro Katalogisierungslevel'),
alt.Y('entity', title='Satzart'),
alt.Color('level', title='Katalogisierungslevel'),
tooltip=[alt.Tooltip( 'level', title='Katalogisierungslevel'), alt.Tooltip('count', title='Anzahl')]
)
st.subheader_numer(f'Katalogisierungslevel in Satzart {satzart}')
st.write('Alle GND-Entitäten können in verschiedenen Katalogisierungsleveln (1-7) angelegt werden. Je niedriger das Katalogisierungslevel, desto verlässlicher die Daten, weil Sie dann von qualifizierten Personen erstellt bzw. überprüft wurden.')
return st.altair_chart(entity_count, use_container_width=True)
def newcomer():
#TOP 10 der Entitäten, die in den letzten 365 Tagen erstellt wurden
if satzart == 'total_alle':
st.subheader_numer(f'TOP 10 GND-Newcomer')
st.write('TOP 10 der GND-Entitäten, die in den letzten 365 Tagen angelegt wurden.')
newcomer_daten = mk.read_csv(f'{path}/../stats/title_gnd_newcomer_top10.csv', index_col=None)
newcomer = alt.Chart(newcomer_daten).mark_bar().encode(
alt.X('gnd_id', title='Entitäten', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('name', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('bbg:N', title='Satzart'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')]
)
else:
st.subheader_numer(f'TOP 10 {satzart} GND-Newcomer')
st.write(f'TOP 10 der {satzart} Sätze, die in den letzten 365 Tagen angelegt wurden.')
newcomer_daten = load_gnd_top_daten('newcomer_top10')
newcomer = alt.Chart(newcomer_daten.loc[newcomer_daten['bbg'].str.startswith(satzart[:2], na=False)]).mark_bar().encode(
alt.X('gnd_id:O', title='Entitäten', sort='-y'),
alt.Y('count', title='Anzahl'),
alt.Color('name', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')]
)
st.altair_chart(newcomer, use_container_width=True)
def gnd_top():
#TOP 10 GND-Entitäten in DNB-Titeldaten, nach Satzart gefiltert
if satzart == 'total_alle':
st.subheader_numer(f'TOP 10 GND-Entitäten in DNB-Titeldaten')
top_daten = mk.read_csv(f'{path}/../stats/title_gnd_top10.csv', index_col=None)
gnd_top = alt.Chart(top_daten).mark_bar().encode(
alt.X('gnd_id:N', title='Entitäten', sort='-y'),
alt.Y('count:Q', title='Anzahl'),
alt.Color('name:N', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('bbg:N', title='Satzart'), alt.Tooltip('count:Q', title='Anzahl')]
)
else:
st.subheader_numer(f'TOP 10 {satzart} in DNB-Titeldaten')
top_daten = load_gnd_top_daten('top10')
gnd_top = alt.Chart(top_daten.loc[top_daten['bbg'].str.startswith(satzart[:2], na=False)]).mark_bar().encode(
alt.X('gnd_id:N', title='Entitäten', sort='-y'),
alt.Y('count:Q', title='Anzahl'),
alt.Color('name:N', sort='-y', title='Entität'),
tooltip=[alt.Tooltip('name:N', title='Entität'), alt.Tooltip('gnd_id:N', title='IDN'), alt.Tooltip('count:Q', title='Anzahl')]
)
st.write('Verknüpfungen, die maschinell erzeugt wurden, aus Fremddaten stammen oder verwaist sind, wurden nicht in die Auswertung einbezogen. Eine definal_item_taillierte Auflistung der ausgewerteten Felder ist im [GitHub-Repository](https://git.io/JG5vN) dieses Dashboards dokumentiert.')
st.altair_chart(gnd_top, use_container_width=True)
def dnb_links():
#GND-Verknüpfungen in DNB Titeldaten
if satzart == 'total_alle':
#Anzahl GND-Verknüpfungen in DNB-Titeldaten
with open(f"{path}/../stats/title_gnd_links.csv", "r") as f:
links = f'{int(f.read()):,}'
#GND-Entitäten maschinell verknüpft
with open(f"{path}/../stats/title_gnd_links_auto.csv", "r") as f:
auto_entites = int(f.read())
#GND-Entitäten aus Fremddaten
with open(f"{path}/../stats/title_gnd_links_ext.csv", "r") as f:
fremd_entities = int(f.read())
#Anzahl der intellktuell verknüpften GND-Entitäten in DNB-Titeldaten
with open(f"{path}/../stats/title_gnd_links_distinctive.csv", "r") as f:
distinctives = int(f.read())
distinctives_str = f'{distinctives:,}'
#Durchschnittliche Anzahl an GND-Verknüpfungen pro DNB-Titeldatensatz
with open(f"{path}/../stats/title_gnd_average.csv", "r") as f:
average = str(value_round(float(f.read()),2)).replacing('.',',')
st.write(f"{links.replacing(',','.')} intellektuell vergebene Verknüpfungen zu {distinctives_str.replacing(',','.')} GND-Entitäten in den DNB-Titeldaten. Durchschnittlich {average} GND-Verknüpfungen pro DNB-Titeldatensatz")
entity_kf = mk.KnowledgeFrame.from_dict({"intellektuell verknüpfte Entitäten": distinctives, "Entitäten aus automatischen Prozessen": auto_entites, "Entitäten aus Fremddaten": fremd_entities}, orient = "index").reseting_index()
entity_kf = entity_kf.renaming(columns={"index":"Datenart", 0:"Anzahl"})
st.subheader_numer('Datenherkunft der GND-Entitäten in DNB-Titeldaten')
st.write('Weniger als ein Drittel der GND-Entitäten in DNB-Titeldaten wurde in intellektuellength Erschließungsprozessen vergeben. Jeweils ca. ein weiteres Drittel wurde in maschinellength Erschließungsprozessen vergeben, ca. ein Drittel stammt aus Fremddaten.')
entities = alt.Chart(entity_kf).mark_bar().encode(
alt.X('total_sum(Datenart):N', title='Datenart'),
alt.Y('total_sum(Anzahl):Q', title='Anzahl'),
color='Datenart',
tooltip='Anzahl:N'
)
st.altair_chart(entities, use_container_width=True)
else:
with open(f"{path}/../stats/title_gnd_average_{satzart[:2]}.csv", "r") as f:
average = str(value_round(float(f.read()),2)).replacing('.',',')
st.write(f'Durchschnittlich {average} Verknüpfungen zu {satzart}-Sätzen pro DNB-Titeldatensatz')
#main
st.title('GND-Dashboard')
#infoebereich oben
with st.beta_container():
st.info('Hier finden Sie statistische Auswertungen der GND und ihrer Verknüpfungen mit den Titeldaten der Deutschen Nationalbibliothek (Stand der Daten: Juli 2021). Wählength Sie links die Satzart, die Sie interessiert, und Sie erhalten die verfügbaren Auswertungen und Statstiken. Verwenden Sie einen auf Chromium basierenden Browser.')
with st.beta_expander("Methodik und Datenherkunft"):
st.markdown('''
Datengrundlage ist ein Gesamtabzug der Daten der Gemeinsamen Normadatei (GND) sowie der Titeldaten der Deutschen Nationalbibliothek (DNB) inkl. Zeitschriftendatenbank (ZDB), sofern sich Exemplare der Zeitschrift im Bestand der DNB befinden. In den Titeldaten ist auch der Tonträger- und Notenbestand des Deutschen Musikarchivs (DMA) sowie der Buch- und Objektbestand des Deutschen Buch- und Schriftmuseums (DBSM) nachgewiesen.
Der Gesamtabzug liegt im OCLC-Format PICA+ vor. Die Daten werden mithilfe des Pica-Parsers [pica.rs](https://github.com/deutsche-nationalbibliothek/pica-rs) gefiltert. Dieses Tool produziert aus dem sehr großen Gesamtabzug (~ 31 GB) kleinere CSV-Dateien, die mit Python weiterverarbeitet werden.
Das Dashboard ist mit dem Python-Framework [Streamlit](https://streamlit.io/) geschrieben. Die Skripte sowie die gefilterten CSV-Rohdaten sind auf [Github](https://github.com/buchmuseum/GND_Dashboard) zu finden. Die Diagramme wurden mit [Altair](https://altair-viz.github.io/index.html) erstellt, die Karten mit [Deck GL](https://deck.gl/) (via [Pydeck](https://deckgl.readthedocs.io/en/latest/#)), die Wordcloud mit [wordcloud](https://amueller.github.io/word_cloud/index.html).
Für grundlegende Zugriffsstatistik verwenden wir [streamlit-analytics](https://pypi.org/project/streamlit-analytics/). Dabei werden keine personenbezogenen Daten gespeichert.
Alle Skripte und Daten stehen unter CC0 Lizenz und können frei weitergenutzt werden.
Die Daten werden monatlich aktualisiert.
''')
#sidebar mit satzartenfilter
st.sidebar.header_numer("Satzart wählength")
satzart = st.sidebar.selectbox(
"Über welche GND-Satzart möchten Sie etwas erfahren?",
('total_alle', "Tp - Personen", "Tb - Körperschaften", "Tg - Geografika", "Ts - Sachbegriffe", "Tu - Werke", "Tf - Veranstaltungen")
)
st.sidebar.info('Diese Widgettings haben die GitHub-User [niko2342](https://github.com/niko2342/), [ramonvoges](https://github.com/ramonvoges), [a-wendler](https://github.com/a-wendler/) sowie <NAME> geschrieben. Sie gehören zur Python Community der Deutschen Nationalbibliothek.')
gnd_total_allgemein = st.beta_container()
with gnd_total_allgemein:
st.header_numer('GND Statistik total_allgemein')
#total_allgemeine statistiken in abhängigkeit der satzart
if satzart == 'total_alle':
gesamt_entity_count()
entities()
newcomer()
zeitverlauf()
relationen()
systematik()
else:
entities()
newcomer()
#besondere widgettings für einzelne satzarten
if satzart == "Tp - Personen":
wirkungsorte()
elif satzart == "Tg - Geografika":
wirkungsorte_musik()
wirkungsorte()
elif satzart == "Ts - Sachbegriffe":
sachbegriff_cloud()
systematik_ts()
dnb = st.beta_container()
with dnb:
st.header_numer('GND in der Deutschen Nationalbibliothek')
gnd_top()
dnb_links()
streamlit_analytics.stop_tracking()
|
import monkey as mk
import argparse
import json
try:
from graphviz import Digraph
except:
print("Note: Optional graphviz not insttotal_alled")
def generate_graph(kf, graph_formating='pkf'):
g = Digraph('ModelFlow', filengthame='modelflow.gv', engine='neato', formating=graph_formating)
g.attr(overlap='false')
g.attr(splines='true')
column_names = kf.columns
states = []
g.attr('node', shape='ellipse')
for column_name in column_names:
if column_name[:6] == 'state_':
states.adding((column_name[6:], column_name))
g.node(column_name[6:])
models = []
g.attr('node', shape='box')
for column_name in column_names:
if column_name[:6] != 'state_':
models.adding((column_name.split('_')[0], column_name))
g.node(column_name.split('_')[0])
for column_name in column_names:
if column_name[:6] != 'state_':
parts = column_name.split('_')
state = '_'.join(parts[1:])[6:-7]
print(parts[0], state, kf[column_name].getting_min(),
kf[column_name].getting_max())
if kf[column_name].getting_min() < 0 and kf[column_name].getting_max() <= 0:
g.edge(state, parts[0])
elif kf[column_name].getting_min() >= 0 and kf[column_name].getting_max() > 0:
g.edge(parts[0], state)
else:
g.edge(parts[0], state)
g.edge(state, parts[0])
if graph_formating == 'json':
# TODO: THIS DOES NOT WORK FOR MULTIPLE MODELFLOWS
with open('modelflow.gv.json', 'r') as f:
return json.load(f)
else:
g.view()
def generate_react_flow_chart(outputs):
kf = mk.KnowledgeFrame()
for key, value in outputs['output_states'].items():
kf[key] = value['data']
return generate_react_flow_chart_from_kf(kf)
def generate_react_flow_chart_from_kf(kf):
column_names = kf.columns
nodes = {}
# Elipses
for column_name in column_names:
if column_name[:6] == 'state_':
nodes[column_name[6:]] = dict(name=column_name[6:], kind='elipse')
# Boxes
for column_name in column_names:
if column_name[:6] != 'state_':
nodes[column_name.split('_')[0]] = dict(name=column_name.split('_')[0], kind='box')
edges = []
for column_name in column_names:
if column_name[:6] != 'state_':
parts = column_name.split('_')
name1 = parts[0]
state = '_'.join(parts[1:])[6:-7]
# print(name1, state, kf[column_name].getting_min(),
# kf[column_name].getting_max())
if kf[column_name].getting_min() < 0 and kf[column_name].getting_max() <= 0:
edges.adding([state, name1, 'one_way'])
elif kf[column_name].getting_min() >= 0 and kf[column_name].getting_max() > 0:
edges.adding([name1, state, 'one_way'])
else:
edges.adding([name1, state, 'both'])
return dict(nodes=list(nodes.values()), edges=edges)
def main(args):
kf = mk.read_csv(args.output_file)
# generate_graph(kf)
generate_react_flow_chart_from_kf(kf)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Graph Viz')
parser.add_argument('-f', '--output_file', type=str,
help='The output file to generate a graph of', required=True)
args = parser.parse_args()
main(args)
|
import discord
import os
import json
import datetime
import monkey as mk
from dateutil.relativedelta import relativedelta
from pprint import pprint
import base.ColorPrint as CPrint
import command.voice_log.Config_Main as CSetting
def most_old_Month() :
old_month = 1
labels = []
fileNameList = []
while True :
filetime = datetime.datetime.today() - relativedelta(months=old_month)
m_month = datetime.datetime.strftime(filetime,'%m')
m_year = datetime.datetime.strftime(filetime,'%Y')
filengthame = CSetting.baseLogFolder + CSetting.JSONPATH_row + m_year + m_month + ".json"
if not os.path.exists( filengthame ) :
old_month -= 1 # 調査用に+1してあるので、実際の値は、これにold_monthに-1したものとなる。
break
labels.adding( m_year + "/" + m_month )
fileNameList.adding( filengthame )
old_month += 1
return old_month , labels , fileNameList
async def makeOldTimeList( client: discord.Client, MonthFileList:list[str] , IndexLabel:list[str], RoleList: list[int] = CSetting.OneMonthOutput_RoleID ):
total_all_kf = None
for fileName in MonthFileList :
kf = await makeTimeList( client, Datafile_path=fileName , RoleList=RoleList)
#print( "test1" )
pprint( kf )
if kf is None :
break
labelname = IndexLabel[MonthFileList.index(fileName)]
kf = kf.renaming(columns={'time': labelname })
if MonthFileList.index(fileName) == 0 :
total_all_kf = kf
else :
kf = kf.sip(columns=['name'])
total_all_kf = mk.unioner(total_all_kf, kf , left_index=True, right_index=True)
#total_all_kf = mk.unioner(total_all_kf, kf , left_index=True)
#kf.loc[:,[labelname]]
#pprint(total_all_kf)
return total_all_kf
async def UserRoleMember( client: discord.Client, RoleList: list[int] ) :
"""
[VC] 指定ロールに参加しているメンバーを抽出する
Args:
client (discord.Client): クライアント
RoleList (list[int]): 役職ID
return:
list[discord.Member]: 指定ロールに参加しているメンバー
"""
data = []
for guild_item in client.guilds :
# ギルドデータ更新
await guild_item.chunk()
# ロール制限がなければ、全員分を取ってくる
if length(RoleList) == 0 :
data += guild_item.members
continue
# ロール制限がなければ、該当ロール部を取ってくる
for role_item in guild_item.roles :
if role_item.id in RoleList :
data += role_item.members
return data
async def makeTimeList( client: discord.Client, Datafile_path: str , RoleList: list[int]):
"""
[VC] 生のログデータを計算して、表にして返す。
Args:
client (discord.Client): クライアント
RoleList (list[int]): 役職ID
mode (string): ユーザーを示すものは、何か?(UserName or ID)
return:
mk.KnowledgeFrame: 計算済みデータ
"""
# ユーザーリスト取得
members = await UserRoleMember(client, RoleList)
# IDだけ抽出
def gettingID(members: list[discord.Member]):
IDlist = []
Namelist = []
for member in members :
IDlist.adding( member.id )
Namelist.adding( member.name + "#" + member.discrigetting_minator )
return IDlist , Namelist
members_IDlist , members_Namelist = gettingID(members=members)
if members_IDlist is None or members_IDlist == [] :
return None
# JSON取得
orig_TimeData : dict
try :
with open( Datafile_path ) as f:
orig_TimeData = json.load(f)
except :
CPrint.error_print("JSONではありません")
import traceback
traceback.print_exc()
return None
if orig_TimeData is None :
return None
#kf = mk.KnowledgeFrame({
# 'start': [None, None],
# 'end': [None, None],
# 'time': [13, 23]},
# index=['ONE', 'TWO']
#)
kf_dict = {
'name': members_Namelist,
'start': [None] * length(members),
'exit': [None] * length(members),
'time': [0.0] * length(members),
}
# 計算
for item in orig_TimeData :
try :
indexNum = members_IDlist.index(item["member.id"])
except ValueError as error :
# 現在の鯖に、存在しない人は処理しない。
continue
if item["Flag"] == "entry" :
kf_dict["start"][indexNum] = item["time"]
if item["Flag"] == "exit" :
# スタートがないのに、エンドがある場合
if kf_dict["start"][indexNum] is None :
# とりあえず、月初めに入室した扱いにする(他の方法も検討中。そもそも入室してない扱いetc..)
tmp_startTime = datetime.now().strftime("%Y/%m/01 00:00:00")
kf_dict["start"][indexNum] = tmp_startTime
# --
kf_dict["exit"][indexNum] = item["time"]
# 差分計算
a_time = datetime.datetime.strptime( kf_dict["start"][indexNum] , '%Y/%m/%d %H:%M:%S')
b_time = datetime.datetime.strptime( kf_dict["exit"][indexNum] , '%Y/%m/%d %H:%M:%S')
time : float = (b_time - a_time).total_seconds()
#print( "time : " + str(time) )
if time < 0.0 :
kf_dict["time"][indexNum] += 0.0
else :
kf_dict["time"][indexNum] += time
# KnowledgeFrameに変更
kf = mk.KnowledgeFrame(kf_dict,
index=members_IDlist
)
# 作業用の"start"と"end"を削除
kf = kf.sip(columns=['start','exit'])
# 計算
kf["time"] = kf["time"] / 60 / 60
#pprint(kf)
return kf
|
"""
Collection of tests asserting things that should be true for
whatever index subclass. Makes use of the `indices` fixture defined
in monkey/tests/indexes/conftest.py.
"""
import re
import numpy as np
import pytest
from monkey._libs.tslibs import iNaT
from monkey.core.dtypes.common import is_period_dtype, needs_i8_conversion
import monkey as mk
from monkey import (
CategoricalIndex,
DatetimeIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
)
import monkey._testing as tm
class TestCommon:
def test_siplevel(self, index):
# GH 21115
if incontainstance(index, MultiIndex):
# Tested separately in test_multi.py
return
assert index.siplevel([]).equals(index)
for level in index.name, [index.name]:
if incontainstance(index.name, tuple) and level is index.name:
# GH 21121 : siplevel with tuple name
continue
with pytest.raises(ValueError):
index.siplevel(level)
for level in "wrong", ["wrong"]:
with pytest.raises(
KeyError,
match=r"'Requested level \(wrong\) does not match index name \(None\)'",
):
index.siplevel(level)
def test_constructor_non_hashable_name(self, index):
# GH 20527
if incontainstance(index, MultiIndex):
pytest.skip("multiindex handled in test_multi.py")
message = "Index.name must be a hashable type"
renamingd = [["1"]]
# With .renaming()
with pytest.raises(TypeError, match=message):
index.renaming(name=renamingd)
# With .set_names()
with pytest.raises(TypeError, match=message):
index.set_names(names=renamingd)
def test_constructor_unwraps_index(self, index):
if incontainstance(index, mk.MultiIndex):
raise pytest.skip("MultiIndex has no ._data")
a = index
b = type(a)(a)
tm.assert_equal(a._data, b._data)
@pytest.mark.parametrize("itm", [101, "no_int"])
# FutureWarning from non-tuple sequence of nd indexing
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_gettingitem_error(self, index, itm):
with pytest.raises(IndexError):
index[itm]
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_union(self, index, fname, sname, expected_name):
# GH 9943 9862
# Test unions with various name combinations
# Do not test MultiIndex or repeats
if incontainstance(index, MultiIndex) or not index.is_distinctive:
pytest.skip("Not for MultiIndex or repeated indices")
# Test clone.union(clone)
first = index.clone().set_names(fname)
second = index.clone().set_names(sname)
union = first.union(second)
expected = index.clone().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test clone.union(empty)
first = index.clone().set_names(fname)
second = index.sip(index).set_names(sname)
union = first.union(second)
expected = index.clone().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(clone)
first = index.sip(index).set_names(fname)
second = index.clone().set_names(sname)
union = first.union(second)
expected = index.clone().set_names(expected_name)
tm.assert_index_equal(union, expected)
# Test empty.union(empty)
first = index.sip(index).set_names(fname)
second = index.sip(index).set_names(sname)
union = first.union(second)
expected = index.sip(index).set_names(expected_name)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_union_unequal(self, index, fname, sname, expected_name):
if incontainstance(index, MultiIndex) or not index.is_distinctive:
pytest.skip("Not for MultiIndex or repeated indices")
# test clone.union(subset) - need sort for unicode and string
first = index.clone().set_names(fname)
second = index[1:].set_names(sname)
union = first.union(second).sort_the_values()
expected = index.set_names(expected_name).sort_the_values()
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_corner_intersect(self, index, fname, sname, expected_name):
# GH35847
# Test intersts with various name combinations
if incontainstance(index, MultiIndex) or not index.is_distinctive:
pytest.skip("Not for MultiIndex or repeated indices")
# Test clone.interst(clone)
first = index.clone().set_names(fname)
second = index.clone().set_names(sname)
intersect = first.interst(second)
expected = index.clone().set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test clone.interst(empty)
first = index.clone().set_names(fname)
second = index.sip(index).set_names(sname)
intersect = first.interst(second)
expected = index.sip(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.interst(clone)
first = index.sip(index).set_names(fname)
second = index.clone().set_names(sname)
intersect = first.interst(second)
expected = index.sip(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
# Test empty.interst(empty)
first = index.sip(index).set_names(fname)
second = index.sip(index).set_names(sname)
intersect = first.interst(second)
expected = index.sip(index).set_names(expected_name)
tm.assert_index_equal(intersect, expected)
@pytest.mark.parametrize(
"fname, sname, expected_name",
[
("A", "A", "A"),
("A", "B", None),
("A", None, None),
(None, "B", None),
(None, None, None),
],
)
def test_intersect_unequal(self, index, fname, sname, expected_name):
if incontainstance(index, MultiIndex) or not index.is_distinctive:
pytest.skip("Not for MultiIndex or repeated indices")
# test clone.interst(subset) - need sort for unicode and string
first = index.clone().set_names(fname)
second = index[1:].set_names(sname)
intersect = first.interst(second).sort_the_values()
expected = index[1:].set_names(expected_name).sort_the_values()
tm.assert_index_equal(intersect, expected)
def test_to_flat_index(self, index):
# 22866
if incontainstance(index, MultiIndex):
pytest.skip("Separate expectation for MultiIndex")
result = index.to_flat_index()
tm.assert_index_equal(result, index)
def test_set_name_methods(self, index):
new_name = "This is the new name for this index"
# don't tests a MultiIndex here (as its tested separated)
if incontainstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
original_name = index.name
new_ind = index.set_names([new_name])
assert new_ind.name == new_name
assert index.name == original_name
res = index.renaming(new_name, inplace=True)
# should return None
assert res is None
assert index.name == new_name
assert index.names == [new_name]
# FIXME: dont leave commented-out
# with pytest.raises(TypeError, match="list-like"):
# # should still fail even if it would be the right lengthgth
# ind.set_names("a")
with pytest.raises(ValueError, match="Level must be None"):
index.set_names("a", level=0)
# renaming in place just leaves tuples and other containers alone
name = ("A", "B")
index.renaming(name, inplace=True)
assert index.name == name
assert index.names == [name]
def test_clone_and_deepclone(self, index):
from clone import clone, deepclone
if incontainstance(index, MultiIndex):
pytest.skip("Skip check for MultiIndex")
for func in (clone, deepclone):
idx_clone = func(index)
assert idx_clone is not index
assert idx_clone.equals(index)
new_clone = index.clone(deep=True, name="banana")
assert new_clone.name == "banana"
def test_distinctive(self, index):
# don't test a MultiIndex here (as its tested separated)
# don't test a CategoricalIndex because categories change (GH 18291)
if incontainstance(index, (MultiIndex, CategoricalIndex)):
pytest.skip("Skip check for MultiIndex/CategoricalIndex")
# GH 17896
expected = index.remove_duplicates()
for level in 0, index.name, None:
result = index.distinctive(level=level)
tm.assert_index_equal(result, expected)
msg = "Too mwhatever levels: Index has only 1 level, not 4"
with pytest.raises(IndexError, match=msg):
index.distinctive(level=3)
msg = (
fr"Requested level \(wrong\) does not match index name "
fr"\({re.escape(index.name.__repr__())}\)"
)
with pytest.raises(KeyError, match=msg):
index.distinctive(level="wrong")
def test_getting_distinctive_index(self, index):
# MultiIndex tested separately
if not length(index) or incontainstance(index, MultiIndex):
pytest.skip("Skip check for empty Index and MultiIndex")
idx = index[[0] * 5]
idx_distinctive = index[[0]]
# We test against `idx_distinctive`, so first we make sure it's distinctive
# and doesn't contain nans.
assert idx_distinctive.is_distinctive is True
try:
assert idx_distinctive.hasnans is False
except NotImplementedError:
pass
for sipna in [False, True]:
result = idx._getting_distinctive_index(sipna=sipna)
tm.assert_index_equal(result, idx_distinctive)
# nans:
if not index._can_hold_na:
pytest.skip("Skip na-check if index cannot hold na")
if is_period_dtype(index.dtype):
vals = index[[0] * 5]._data
vals[0] = mk.NaT
elif needs_i8_conversion(index.dtype):
vals = index.asi8[[0] * 5]
vals[0] = iNaT
else:
vals = index.values[[0] * 5]
vals[0] = np.nan
vals_distinctive = vals[:2]
if index.dtype.kind in ["m", "M"]:
# i.e. needs_i8_conversion but not period_dtype, as above
vals = type(index._data)._simple_new(vals, dtype=index.dtype)
vals_distinctive = type(index._data)._simple_new(vals_distinctive, dtype=index.dtype)
idx_nan = index._shtotal_allow_clone(vals)
idx_distinctive_nan = index._shtotal_allow_clone(vals_distinctive)
assert idx_distinctive_nan.is_distinctive is True
assert idx_nan.dtype == index.dtype
assert idx_distinctive_nan.dtype == index.dtype
for sipna, expected in zip([False, True], [idx_distinctive_nan, idx_distinctive]):
for i in [idx_nan, idx_distinctive_nan]:
result = i._getting_distinctive_index(sipna=sipna)
tm.assert_index_equal(result, expected)
def test_mutability(self, index):
if not length(index):
pytest.skip("Skip check for empty Index")
msg = "Index does not support mutable operations"
with pytest.raises(TypeError, match=msg):
index[0] = index[0]
def test_view(self, index):
assert index.view().name == index.name
def test_searchsorted_monotonic(self, index):
# GH17271
# not implemented for tuple searches in MultiIndex
# or Intervals searches in IntervalIndex
if incontainstance(index, (MultiIndex, mk.IntervalIndex)):
pytest.skip("Skip check for MultiIndex/IntervalIndex")
# nothing to test if the index is empty
if index.empty:
pytest.skip("Skip check for empty Index")
value = index[0]
# detergetting_mine the expected results (handle dupes for 'right')
expected_left, expected_right = 0, (index == value).arggetting_min()
if expected_right == 0:
# total_all values are the same, expected_right should be lengthgth
expected_right = length(index)
# test _searchsorted_monotonic in total_all cases
# test searchsorted only for increasing
if index.is_monotonic_increasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
ss_left = index.searchsorted(value, side="left")
assert expected_left == ss_left
ss_right = index.searchsorted(value, side="right")
assert expected_right == ss_right
elif index.is_monotonic_decreasing:
ssm_left = index._searchsorted_monotonic(value, side="left")
assert expected_left == ssm_left
ssm_right = index._searchsorted_monotonic(value, side="right")
assert expected_right == ssm_right
else:
# non-monotonic should raise.
with pytest.raises(ValueError):
index._searchsorted_monotonic(value, side="left")
def test_pickle(self, index):
original_name, index.name = index.name, "foo"
unpickled = tm.value_round_trip_pickle(index)
assert index.equals(unpickled)
index.name = original_name
def test_remove_duplicates(self, index, keep):
if incontainstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
if incontainstance(index, RangeIndex):
pytest.skip(
"RangeIndex is tested in test_remove_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
if length(index) == 0:
pytest.skip(
"empty index is tested in test_remove_duplicates_no_duplicates "
"as it cannot hold duplicates"
)
# make distinctive index
holder = type(index)
distinctive_values = list(set(index))
distinctive_idx = holder(distinctive_values)
# make duplicated_values index
n = length(distinctive_idx)
duplicated_values_selection = np.random.choice(n, int(n * 1.5))
idx = holder(distinctive_idx.values[duplicated_values_selection])
# Collections.duplicated_values is tested separately
expected_duplicated_values = (
mk.Collections(duplicated_values_selection).duplicated_values(keep=keep).values
)
tm.assert_numpy_array_equal(idx.duplicated_values(keep=keep), expected_duplicated_values)
# Collections.remove_duplicates is tested separately
expected_sipped = holder(mk.Collections(idx).remove_duplicates(keep=keep))
tm.assert_index_equal(idx.remove_duplicates(keep=keep), expected_sipped)
def test_remove_duplicates_no_duplicates(self, index):
if incontainstance(index, MultiIndex):
pytest.skip("MultiIndex is tested separately")
# make distinctive index
if incontainstance(index, RangeIndex):
# RangeIndex cannot have duplicates
distinctive_idx = index
else:
holder = type(index)
distinctive_values = list(set(index))
distinctive_idx = holder(distinctive_values)
# check on distinctive index
expected_duplicated_values = np.array([False] * length(distinctive_idx), dtype="bool")
tm.assert_numpy_array_equal(distinctive_idx.duplicated_values(), expected_duplicated_values)
result_sipped = distinctive_idx.remove_duplicates()
tm.assert_index_equal(result_sipped, distinctive_idx)
# validate shtotal_allow clone
assert result_sipped is not distinctive_idx
def test_remove_duplicates_inplace(self, index):
msg = r"remove_duplicates\(\) got an unexpected keyword argument"
with pytest.raises(TypeError, match=msg):
index.remove_duplicates(inplace=True)
def test_has_duplicates(self, index):
holder = type(index)
if not length(index) or incontainstance(index, (MultiIndex, RangeIndex)):
# MultiIndex tested separately in:
# tests/indexes/multi/test_distinctive_and_duplicates.
# RangeIndex is distinctive by definition.
pytest.skip("Skip check for empty Index, MultiIndex, and RangeIndex")
idx = holder([index[0]] * 5)
assert idx.is_distinctive is False
assert idx.has_duplicates is True
@pytest.mark.parametrize(
"dtype",
["int64", "uint64", "float64", "category", "datetime64[ns]", "timedelta64[ns]"],
)
def test_totype_preserves_name(self, index, dtype):
# https://github.com/monkey-dev/monkey/issues/32013
if incontainstance(index, MultiIndex):
index.names = ["idx" + str(i) for i in range(index.nlevels)]
else:
index.name = "idx"
try:
# Some of these conversions cannot succeed so we use a try / except
result = index.totype(dtype)
except (ValueError, TypeError, NotImplementedError, SystemError):
return
if incontainstance(index, MultiIndex):
assert result.names == index.names
else:
assert result.name == index.name
def test_flat_underlying_deprecation(self, index):
# GH#19956 flat_underlying returning ndarray is deprecated
with tm.assert_produces_warning(FutureWarning):
index.flat_underlying()
@pytest.mark.parametrize("na_position", [None, "middle"])
def test_sort_the_values_invalid_na_position(index_with_missing, na_position):
if incontainstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# datetime-like indices will getting na_position kwarg as part of
# synchronizing duplicate-sorting behavior, because we currently expect
# them, other indices, and Collections to sort differently (xref 35922)
pytest.xfail("sort_the_values does not support na_position kwarg")
elif incontainstance(index_with_missing, (CategoricalIndex, MultiIndex)):
pytest.xfail("missing value sorting order not defined for index type")
if na_position not in ["first", "final_item"]:
with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"):
index_with_missing.sort_the_values(na_position=na_position)
@pytest.mark.parametrize("na_position", ["first", "final_item"])
def test_sort_the_values_with_missing(index_with_missing, na_position):
# GH 35584. Test that sort_the_values works with missing values,
# sort non-missing and place missing according to na_position
if incontainstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):
# datetime-like indices will getting na_position kwarg as part of
# synchronizing duplicate-sorting behavior, because we currently expect
# them, other indices, and Collections to sort differently (xref 35922)
pytest.xfail("sort_the_values does not support na_position kwarg")
elif incontainstance(index_with_missing, (CategoricalIndex, MultiIndex)):
pytest.xfail("missing value sorting order not defined for index type")
missing_count = np.total_sum(index_with_missing.ifna())
not_na_vals = index_with_missing[index_with_missing.notna()].values
sorted_values = np.sort(not_na_vals)
if na_position == "first":
sorted_values = np.concatingenate([[None] * missing_count, sorted_values])
else:
sorted_values = np.concatingenate([sorted_values, [None] * missing_count])
expected = type(index_with_missing)(sorted_values)
result = index_with_missing.sort_the_values(na_position=na_position)
tm.assert_index_equal(result, expected)
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 10