content
stringlengths 5
1.05M
|
|---|
import platform
import textwrap
import unittest
from conans.test.utils.conan_v2_tests import ConanV2ModeTestCase
from conans.test.utils.tools import TestClient
class ConanfileSourceTestCase(ConanV2ModeTestCase):
""" Conan v2: 'self.cpp_info' is not available in 'package_id()' """
def test_cppinfo_not_in_package_id(self):
# self.cpp_info is not available in 'package_id'
t = self.get_client()
conanfile = textwrap.dedent("""
from conans import ConanFile
class Recipe(ConanFile):
def package_id(self):
self.cpp_info.libs = ["A"]
""")
t.save({'conanfile.py': conanfile})
t.run('create . name/version@ -s os=Linux', assert_error=True)
self.assertIn("Conan v2 incompatible: 'self.cpp_info' access in package_id() method is deprecated", t.out)
class ConanfilePackageIdV1TestCase(unittest.TestCase):
""" Conan v1 will show a warning """
def test_v1_warning(self):
t = TestClient()
conanfile = textwrap.dedent("""
from conans import ConanFile
class Recipe(ConanFile):
def package_id(self):
self.cpp_info.libs = ["A"] # No sense, it will warn the user
""")
t.save({'conanfile.py': conanfile})
t.run('create . name/version@', assert_error=True) # It is already raising
self.assertIn("AttributeError: 'NoneType' object has no attribute 'libs'", t.out)
# self.assertIn("name/version: WARN: 'self.info' access in package() method is deprecated", t.out)
|
from test_ruby import TestRuby
|
""" Random comments:
- if song wasn't found it will return 404
- link should be lower-cased
- all spaces and punctuation characters should be deleted
"""
from prismriver.plugin.common import Plugin
from prismriver.struct import Song
class AZLyricsPlugin(Plugin):
ID = 'azlyrics'
def __init__(self, config):
super(AZLyricsPlugin, self).__init__('AZLyrics', config)
def search_song(self, artist, title):
to_delete = [' ', ',', '.', '-', '?', '!', '/', '&', "'", '(', ')']
link = 'http://www.azlyrics.com/lyrics/{}/{}.html'.format(
self.prepare_url_parameter(artist, to_delete),
self.prepare_url_parameter(title, to_delete)).lower()
page = self.download_webpage(link)
if page:
soup = self.prepare_soup(page)
head_pane = soup.find('div', {'class': 'lyricsh'})
song_artist = head_pane.find('b').text.replace(' LYRICS', '')
main_pane = soup.find('div', {'class': 'col-xs-12 col-lg-8 text-center'})
song_title = main_pane.find('b', recursive=False).text[1:-1]
lyric_pane = main_pane.find('div', {'class': None}, recursive=False)
lyric = self.parse_verse_block(lyric_pane)
return Song(song_artist, song_title, self.sanitize_lyrics([lyric]))
|
#!/usr/bin/env python
"""
Performs setup of dotfiles and some system options.
See '--help' for usage.
"""
import os
import click
import clckwrkbdgr.jobsequence as JB
setup = JB.JobSequence(
verbose_var_name='DOTFILES_SETUP_VERBOSE',
default_job_dir=[
os.path.join(os.environ['XDG_CONFIG_HOME'], 'setup.d'),
os.path.join(os.environ['XDG_DATA_HOME'], 'setup.d'),
os.path.join(os.environ['HOME'], '.local', 'setup.d'),
],
click=click,
)
if __name__ == '__main__':
setup.cli()
|
"""Module with additional types used by the index"""
from binascii import b2a_hex
from typing import TYPE_CHECKING, NamedTuple, Sequence, Tuple, Union, cast
from git.objects import Blob
from git.types import PathLike
from .util import pack, unpack
# typing ----------------------------------------------------------------------
if TYPE_CHECKING:
from git.repo import Repo
# ---------------------------------------------------------------------------------
__all__ = ("BlobFilter", "BaseIndexEntry", "IndexEntry")
# { Invariants
CE_NAMEMASK = 0x0FFF
CE_STAGEMASK = 0x3000
CE_EXTENDED = 0x4000
CE_VALID = 0x8000
CE_STAGESHIFT = 12
# } END invariants
class BlobFilter(object):
"""
Predicate to be used by iter_blobs allowing to filter only return blobs which
match the given list of directories or files.
The given paths are given relative to the repository.
"""
__slots__ = "paths"
def __init__(self, paths: Sequence[PathLike]) -> None:
"""
:param paths:
tuple or list of paths which are either pointing to directories or
to files relative to the current repository
"""
self.paths = paths
def __call__(self, stage_blob: Blob) -> bool:
path = stage_blob[1].path
for p in self.paths:
if path.startswith(p):
return True
# END for each path in filter paths
return False
class BaseIndexEntryHelper(NamedTuple):
"""Typed namedtuple to provide named attribute access for BaseIndexEntry.
Needed to allow overriding __new__ in child class to preserve backwards compat."""
mode: int
binsha: bytes
flags: int
path: PathLike
ctime_bytes: bytes = pack(">LL", 0, 0)
mtime_bytes: bytes = pack(">LL", 0, 0)
dev: int = 0
inode: int = 0
uid: int = 0
gid: int = 0
size: int = 0
class BaseIndexEntry(BaseIndexEntryHelper):
"""Small Brother of an index entry which can be created to describe changes
done to the index in which case plenty of additional information is not required.
As the first 4 data members match exactly to the IndexEntry type, methods
expecting a BaseIndexEntry can also handle full IndexEntries even if they
use numeric indices for performance reasons.
"""
def __new__(
cls,
inp_tuple: Union[
Tuple[int, bytes, int, PathLike],
Tuple[int, bytes, int, PathLike, bytes, bytes, int, int, int, int, int],
],
) -> "BaseIndexEntry":
"""Override __new__ to allow construction from a tuple for backwards compatibility"""
return super().__new__(cls, *inp_tuple)
def __str__(self) -> str:
return "%o %s %i\t%s" % (self.mode, self.hexsha, self.stage, self.path)
def __repr__(self) -> str:
return "(%o, %s, %i, %s)" % (self.mode, self.hexsha, self.stage, self.path)
@property
def hexsha(self) -> str:
"""hex version of our sha"""
return b2a_hex(self.binsha).decode("ascii")
@property
def stage(self) -> int:
"""Stage of the entry, either:
* 0 = default stage
* 1 = stage before a merge or common ancestor entry in case of a 3 way merge
* 2 = stage of entries from the 'left' side of the merge
* 3 = stage of entries from the right side of the merge
:note: For more information, see http://www.kernel.org/pub/software/scm/git/docs/git-read-tree.html
"""
return (self.flags & CE_STAGEMASK) >> CE_STAGESHIFT
@classmethod
def from_blob(cls, blob: Blob, stage: int = 0) -> "BaseIndexEntry":
""":return: Fully equipped BaseIndexEntry at the given stage"""
return cls((blob.mode, blob.binsha, stage << CE_STAGESHIFT, blob.path))
def to_blob(self, repo: "Repo") -> Blob:
""":return: Blob using the information of this index entry"""
return Blob(repo, self.binsha, self.mode, self.path)
class IndexEntry(BaseIndexEntry):
"""Allows convenient access to IndexEntry data without completely unpacking it.
Attributes usully accessed often are cached in the tuple whereas others are
unpacked on demand.
See the properties for a mapping between names and tuple indices."""
@property
def ctime(self) -> Tuple[int, int]:
"""
:return:
Tuple(int_time_seconds_since_epoch, int_nano_seconds) of the
file's creation time"""
return cast(Tuple[int, int], unpack(">LL", self.ctime_bytes))
@property
def mtime(self) -> Tuple[int, int]:
"""See ctime property, but returns modification time"""
return cast(Tuple[int, int], unpack(">LL", self.mtime_bytes))
@classmethod
def from_base(cls, base: "BaseIndexEntry") -> "IndexEntry":
"""
:return:
Minimal entry as created from the given BaseIndexEntry instance.
Missing values will be set to null-like values
:param base: Instance of type BaseIndexEntry"""
time = pack(">LL", 0, 0)
return IndexEntry(
(base.mode, base.binsha, base.flags, base.path, time, time, 0, 0, 0, 0, 0)
)
@classmethod
def from_blob(cls, blob: Blob, stage: int = 0) -> "IndexEntry":
""":return: Minimal entry resembling the given blob object"""
time = pack(">LL", 0, 0)
return IndexEntry(
(
blob.mode,
blob.binsha,
stage << CE_STAGESHIFT,
blob.path,
time,
time,
0,
0,
0,
0,
blob.size,
)
)
|
'''
模拟多项式函数的拟合过程
'''
import tensorflow as tf
import tensorflow.keras as ks
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
# 自定义训练集、测试集、权重和偏量
n_train,n_test,true_w,true_b=100,100,[1.2,-3.4,5.6],5
features=tf.random.normal(shape=(n_train+n_test,1))
poly_features=tf.concat([features,tf.pow(features,2),tf.pow(features,3)],axis=1)
print(poly_features.shape)
labels=(true_w[0]*poly_features[:,0]+true_w[1]*poly_features[:,1]+true_w[2]*poly_features[:,2]+true_b)
print(labels.shape)
labels+=tf.random.normal(shape=labels.shape,mean=0.0,stddev=0.1)
# 定义图形样式、大小
def use_svg_display():
display.set_matplotlib_formats('svg')
def set_figsize(figsize=(5,3)):
use_svg_display()
plt.rcParams['figure.figsize']=figsize
# 自定义作图函数
def semilogy(x_vals,y_vals,x_label,y_label,x2_vals=None,y2_vals=None,legend=None,figsize=(5,3)):
set_figsize(figsize)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.semilogy(x_vals,y_vals)
if x2_vals and y2_vals:
plt.semilogy(x2_vals,y2_vals,linestyle=':')
plt.legend(legend)
plt.show()
# 定义训练函数,实验拟合效果
num_epochs,loss=100,tf.losses.MeanSquaredError()
def fit_and_plot(train_features,test_features,train_labels,test_labels):
net=ks.Sequential()
net.add(ks.layers.Dense(1))
batch_size=min(10,train_labels.shape[0])
train_iter = tf.data.Dataset.from_tensor_slices((train_features,train_labels)).batch(batch_size)
test_iter = tf.data.Dataset.from_tensor_slices((test_features,test_labels)).batch(batch_size)
optimizer =ks.optimizers.SGD(0.01)
train_ls, test_ls = [], []
for _ in range(num_epochs):
for X, y in train_iter:
with tf.GradientTape() as tape:
l = loss(y, net(X))
grads = tape.gradient(l, net.trainable_variables)
optimizer.apply_gradients(zip(grads, net.trainable_variables))
train_ls.append(loss(train_labels, net(train_features)).numpy().mean())
test_ls.append(loss(test_labels, net(test_features)).numpy().mean())
print(train_ls)
print(test_ls)
print('final epoch: train loss', train_ls[-1], 'test loss', test_ls[-1])
semilogy(range(1,num_epochs+1),train_ls,'epochs','loss',range(1,num_epochs+1),test_ls,['train','test'])
print('weight:',net.get_weights()[0],
'\nbias:',net.get_weights()[1])
# fit_and_plot(poly_features[:n_train, :], poly_features[n_train:, :],
# labels[:n_train], labels[n_train:])
fit_and_plot(features[:n_train, :], features[n_train:, :], labels[:n_train],
labels[n_train:])
|
import pymongo
import json
from datetime import datetime
import time
# 建立连接
myclient = pymongo.MongoClient("mongodb://202.204.62.145:27017/", username='admin', password='afish1001')
mydb = myclient["runoobdb"]
mycol = mydb["json_data"]
def insert_json_data():
file = open('./2019-02-21-10-19.txt', 'r', encoding="utf-8")
for each in file:
print(each)
eachline = json.loads(each)
time = datetime.strptime(eachline["time"], "%Y-%m-%d %H:%M:%S") # 将时间类型转换:str -> date
data = {"monitor_id": eachline["monitor_id"], "display": eachline["display"], "time": time}
result = mycol.insert_one(data)
print(result.acknowledged) # 判断有没有插入成功,成功True,失败:False
file.close()
if __name__ == '__main__':
# mycol.drop()
# insert_json_data()
# 查询总条数
# print(mycol.find().count())
myresult = mycol.find()
objectid = myresult[0]["_id"] # myresult[0]:第一条记录
print("objectid: %s" % objectid) # _id
timestamp = time.mktime(objectid.generation_time.timetuple())
print("timestamp: %d" % timestamp)
timeArray = time.localtime(timestamp)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
print("time_transfer: %sUTC" % otherStyleTime)
|
# -*- coding: utf-8 -*-
"""
Django settings for django_backend project.
Generated by 'django-admin startproject' using Django 4.0.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
MEDIA_ROOT = os.path.join(BASE_DIR, "media/")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv("DJANGO_SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.getenv("DJANGO_DEBUG") == "1"
ALLOWED_HOSTS = os.getenv("DJANGO_ALLOWED_HOSTS").split(",")
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"corsheaders",
"rest_framework",
"sudan_art",
"storages",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"corsheaders.middleware.CorsMiddleware",
]
ROOT_URLCONF = "django_backend.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "django_backend.wsgi.application"
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": os.getenv("DATABASE_ENGINE"),
"NAME": os.getenv("DATABASE_NAME"),
"USER": os.getenv("DATABASE_USERNAME"),
"PASSWORD": os.getenv("DATABASE_PASSWORD"),
"HOST": os.getenv("DATABASE_HOST"),
"PORT": os.getenv("DATABASE_PORT"),
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
# Moving static assets to DigitalOcean Spaces as per:
# https://www.digitalocean.com/community/tutorials/how-to-set-up-object-storage-with-django
AWS_ACCESS_KEY_ID = os.getenv("STATIC_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.getenv("STATIC_SECRET_KEY")
AWS_STORAGE_BUCKET_NAME = os.getenv("STATIC_BUCKET_NAME")
AWS_S3_ENDPOINT_URL = os.getenv("STATIC_ENDPOINT_URL")
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": "max-age=86400",
}
AWS_LOCATION = "media"
AWS_DEFAULT_ACL = "public-read"
STATICFILES_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
STATIC_URL = f"{AWS_S3_ENDPOINT_URL}/{AWS_LOCATION}/"
STATIC_ROOT = "static/"
AWS_QUERYSTRING_AUTH = False
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
CORS_ALLOWED_ORIGINS = [os.getenv("DJANGO_CORS_ORIGINS")]
PUBLIC_MEDIA_LOCATION = "media"
MEDIA_URL = f"https://{AWS_S3_ENDPOINT_URL}/{PUBLIC_MEDIA_LOCATION}/"
REST_FRAMEWORK = {
"DEFAULT_THROTTLE_CLASSES": [
"rest_framework.throttling.AnonRateThrottle",
],
"DEFAULT_THROTTLE_RATES": {
"anon": "999/minute",
},
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 10,
}
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
}
}
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {
"format": "[%(asctime)s] %(levelname)s|%(name)s|%(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
},
},
"handlers": {
"file": {
"level": "DEBUG",
"class": "logging.handlers.RotatingFileHandler",
"filename": os.path.join(BASE_DIR, "logs", "backend.log"),
"backupCount": 10,
"formatter": "simple",
},
},
"loggers": {
"": {
"handlers": ["file"],
"level": "DEBUG",
"propagate": True,
},
"django": {
"handlers": ["file"],
"level": "DEBUG",
"propagate": True,
},
},
}
DATA_UPLOAD_MAX_MEMORY_SIZE = 11000000
FILE_UPLOAD_MAX_MEMORY_SIZE = 10000000
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
|
# -*- coding: utf-8 -*-
import codecs
import os
import pickle
#import io
#from PIL import Image
import numpy as np
def im_to_cifar(image_array, class_im):
# Input: 3D array (32 x 32 x 3)
im_array_R = image_array[:, :, 0]
im_array_G = image_array[:, :, 1]
im_array_B = image_array[:, :, 2]
byte_array = np.array(list(class_im) + list(im_array_R) + list(im_array_G) + list(im_array_B))
print(byte_array) # Wanted to check, but it doesnt print anything...
return byte_array
def randomBackground():
i = np.random.randint(2, size=1)
weights = np.random.uniform(0, 1, size=(3,))
if i == 0:
image = np.random.uniform(0, 255, (32*32*3,))
image[0:32*32] = weights[0] * image[0:32*32]
image[32 * 32:2 * 32 * 32] = weights[0] * image[32 * 32:2*32*32]
image[2 * 32 * 32:3 * 32 * 32] = weights[0] * image[2 * 32 * 32:3 * 32 * 32]
image = np.uint8(image)
elif i == 1:
image = np.random.beta(np.random.uniform(0, 1, 1), np.random.uniform(0, 1, 1), (32*32*3,))*255
image[0:32*32] = weights[0] * image[0:32*32]
image[32 * 32:2 * 32 * 32] = weights[0] * image[32 * 32:2*32*32]
image[2 * 32 * 32:3 * 32 * 32] = weights[0] * image[2 * 32 * 32:3 * 32 * 32]
image = np.uint8(image)
return image
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def dopickle(dict, file):
with open(file, 'wb') as fo:
pickle.dump(dict, fo)
def read_cifar(what):
meta_path = '/home/tobias/PycharmProjects/PlanB/venv/cifar-100-python/meta'
test_path = '/home/tobias/PycharmProjects/PlanB/venv/cifar-100-python/test'
train_path = '/home/tobias/PycharmProjects/PlanB/venv/cifar-100-python/train'
if what is 'meta':
data_path = meta_path
"""
'fine_label_names'
'coarse_label_names'
"""
elif what is 'test':
data_path = test_path
elif what is 'train':
data_path = train_path
""" b'filenames'
b'fine_labels'
b'data'
b'coarse_labels'
b'batch_label'
''
"""
dict = unpickle(data_path)
return dict
def generate_new_cifar():
# meta_dict = read_cifar('meta')
# meta_dict[b'fine_label_names'].append("b'background'")
# meta_dict[b'coarse_label_names'].append("b'background'")
folder = '/home/tobias/PycharmProjects/PlanB/venv/cifar-100-python/'
# dopickle(meta_dict, folder + 'meta_new')
# for i in range(2500):
#
# train_dict = read_cifar('train')
# filename = str(i)
# filename = filename.zfill(6)
# filename = "b'background_s_" + filename + ".png"
# train_dict[b'filenames'].append(filename)
# train_dict[b'fine_labels'].append(20)
# train_dict[b'coarse_labels'].append(100)
# train_dict[b'data'] = np.append(train_dict[b'data'], randomBackground())
# dopickle(train_dict, folder + 'train_new')
for i in range(500):
test_dict = read_cifar('test')
filename = str(i+2500)
filename = filename.zfill(6)
filename = "b'background_s_" + filename + ".png"
test_dict[b'filenames'].append(filename)
test_dict[b'fine_labels'].append(20)
test_dict[b'coarse_labels'].append(100)
test_dict[b'data'] = np.append(test_dict[b'data'], randomBackground())
dopickle(test_dict, folder + 'test_new')
|
import random
import pandas as pd
from datetime import datetime
import plotly
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# Generate Data
x = pd.date_range(datetime.today(), periods=100).tolist()
y = random.sample(range(1, 300), 100)
# Plot figure
fig = go.Figure([go.Bar(x=x, y=y)])
# Asthetics of the plot
fig.update_layout(
{"plot_bgcolor": "rgba(0, 0, 0, 0)", "paper_bgcolor": "rgba(0, 0, 0, 0)"},
autosize=True,
margin=dict(l=50, r=50, b=50, t=50, pad=4, autoexpand=True),
# height=1000,
# hovermode="x",
)
# Add title and dynamic range selector to x axis
fig.update_xaxes(
title_text="Date",
rangeselector=dict(
buttons=list(
[
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(count=1, label="1y", step="year", stepmode="backward"),
dict(step="all"),
]
)
),
)
# Add title to y axis
fig.update_yaxes(title_text="Count")
# Write out to file (.html)
config = {"displayModeBar": False, "displaylogo": False}
plotly_obj = plotly.offline.plot(
fig, include_plotlyjs=False, output_type="div", config=config
)
with open("_includes/plotly_obj.html", "w") as file:
file.write(plotly_obj)
# Grab timestamp
data_updated = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
# Write out to file (.html)
html_str = (
'<p><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16"><path fill-rule="evenodd" d="M1.5 8a6.5 6.5 0 1113 0 6.5 6.5 0 01-13 0zM8 0a8 8 0 100 16A8 8 0 008 0zm.5 4.75a.75.75 0 00-1.5 0v3.5a.75.75 0 00.471.696l2.5 1a.75.75 0 00.557-1.392L8.5 7.742V4.75z"></path></svg> Latest Data: '
+ data_updated
+ "</p>"
)
with open("_includes/update.html", "w") as file:
file.write(html_str)
|
# -*- coding: utf-8 -*-
# @Time : 2020/1/14 上午10:30
# @Author : upcbdipt
# @Project : CDW_FedAvg
# @FileName: reduce_dimension
import os
import numpy as np
import random
import tensorflow as tf
from main.model.autoencoder import AutoEncoderModel
import main.metrics.writer as metrics_writer
from main.utils.model_utils import read_data
from main.ae_client import Client
class ReduceDimension:
def __init__(self, config):
self.config = config
def run(self):
random_seed = self.config.random_seed
random.seed(1 + random_seed)
np.random.seed(12 + random_seed)
tf.set_random_seed(123 + random_seed)
tf.logging.set_verbosity(tf.logging.WARN)
# load model
lr = self.config.lr
n_hidden = self.config.n_hidden
tf.reset_default_graph()
# client
train_data_dir = os.path.join('data', 'train')
test_data_dir = os.path.join('data', 'test')
users, groups, train_data, test_data = read_data(train_data_dir, test_data_dir)
if len(groups) == 0:
groups = [[] for _ in users]
clients = []
i = 0
for u, g in zip(users, groups):
client_model = AutoEncoderModel(config=self.config, seed=random_seed, lr=lr, n_hidden=n_hidden)
clients.append(Client(u, g, train_data[u], test_data[u], client_model))
i += 1
client_ids, client_groups, client_num_samples = get_clients_info(clients)
print('Clients in Total: %d' % len(clients))
# Initialization
print('--- Random Initialization ---')
# save state
stat_writer_fn = get_stat_writer_function(client_ids, client_groups, client_num_samples, self.config)
print_stats(0, clients, client_num_samples, stat_writer_fn)
num_rounds = self.config.num_rounds
eval_every = self.config.eval_every
clients_per_round = self.config.clients_per_round
num_epochs = self.config.num_epochs
batch_size = self.config.batch_size
# training
for i in range(num_rounds):
print('--- Round %d of %d: Training %d Clients ---' % (i + 1, num_rounds, clients_per_round))
# Choose client
client_ids, client_groups, client_num_samples = get_clients_info(clients)
# Simulate server model training on selected clients' data
train_model(config=self.config, num_epochs=num_epochs, batch_size=batch_size,
minibatch=None, clients=clients)
# Test model
if (i + 1) % eval_every == 0 or (i + 1) == num_rounds:
print_stats(i + 1, clients, client_num_samples, stat_writer_fn)
encoders, _ = test_model(clients, 'test')
for client in clients:
reduce_path = os.path.join('data', 'reduce')
if not os.path.exists(reduce_path):
os.makedirs(reduce_path)
np.save(os.path.join(reduce_path, str(client.id + '.npy')), encoders[client.id])
client.model.close()
def train_model(config, num_epochs=1, batch_size=10, minibatch=None, clients=None):
sys_metrics = {
c.id: {config.bytes_written_key: 0,
config.bytes_read_key: 0,
config.local_computations_key: 0} for c in clients}
for c in clients:
# train
comp, num_samples, update = c.train(num_epochs, batch_size, minibatch)
sys_metrics[c.id][config.bytes_read_key] += c.model.size
sys_metrics[c.id][config.bytes_written_key] += c.model.size
sys_metrics[c.id][config.local_computations_key] = comp
return sys_metrics
def get_clients_info(clients):
"""Returns the ids, hierarchies and num_samples for the given clients.
Returns info about self.selected_clients if clients=None;
Args:
clients: list of Client objects.
"""
ids = [c.id for c in clients]
groups = {c.id: c.group for c in clients}
num_samples = {c.id: c.num_samples for c in clients}
return ids, groups, num_samples
def get_stat_writer_function(ids, groups, num_samples, config):
def writer_fn(num_round, metrics, partition):
metrics_writer.print_metrics(
num_round, ids, metrics, groups, num_samples, partition, config.reduce_dimension_metrics_dir,
'{}_{}'.format(config.reduce_dimension_metrics_name, 'stat'))
return writer_fn
def print_stats(
num_round, clients, num_samples, writer, use_val_set=False):
_, train_stat_metrics = test_model(clients, set_to_use='train') # 每一个客户端的loss
print_metrics(train_stat_metrics, num_samples, prefix='train_')
writer(num_round, train_stat_metrics, 'train')
eval_set = 'test' if not use_val_set else 'val'
_, test_stat_metrics = test_model(clients, set_to_use=eval_set)
print_metrics(test_stat_metrics, num_samples, prefix='{}_'.format(eval_set))
writer(num_round, test_stat_metrics, eval_set)
def test_model(clients_to_test, set_to_use='test'):
"""Tests self.model on given clients.
Tests model on self.selected_clients if clients_to_test=None.
Args:
clients_to_test: list of Client objects.
set_to_use: dataset to test on. Should be in ['train', 'test'].
"""
metrics = {}
encoders = {}
for client in clients_to_test:
encoder, c_metrics = client.test(set_to_use)
metrics[client.id] = c_metrics
encoders[client.id] = encoder
return encoders, metrics
def print_metrics(metrics, weights, prefix=''):
"""Prints weighted averages of the given metrics.
Args:
metrics: dict with client ids as keys. Each entry is a dict
with the metrics of that client.
weights: dict with client ids as keys. Each entry is the weight
for that client.
"""
ordered_weights = [weights[c] for c in sorted(weights)]
metric_names = metrics_writer.get_metrics_names(metrics)
to_ret = None
# for metric in metric_names:
# ordered_metric = [metrics[c][metric] for c in sorted(metrics)]
# print('%s: %g, 10th percentile: %g, 50th percentile: %g, 90th percentile %g' \
# % (prefix + metric,
# np.average(ordered_metric, weights=ordered_weights),
# np.percentile(ordered_metric, 10),
# np.percentile(ordered_metric, 50),
# np.percentile(ordered_metric, 90)))
|
"""
https://www.practicepython.org
Exercise 23: File Overlap
2 chilis
Given two .txt files that have lists of numbers in them, find the numbers
that are overlapping. One .txt file has a list of all prime numbers under
1000, and the other .txt file has a list of happy numbers up to 1000.
(If you forgot, prime numbers are numbers that can’t be divided by any
other number. And yes, happy numbers are a real thing in mathematics -
you can look it up on Wikipedia. The explanation is easier with an example,
which I will describe below.)
"""
def file_overlap():
# read in the primes and save as a list of ints
primes = []
with open('primes.txt', 'r') as primes_file:
next_prime = primes_file.readline()
while next_prime:
primes.append(int(next_prime))
next_prime = primes_file.readline()
# read in the happy numbers and compare each value to the prime list
with open('happy.txt', 'r') as happy_file:
next_happy = happy_file.readline()
while next_happy:
if int(next_happy.strip("\n")) in primes:
print(next_happy.strip("\n") + " is in both lists")
next_happy = happy_file.readline()
if __name__ == '__main__':
file_overlap()
"""
Michele's second solution:
def filetolistofints(filename):
list_to_return = []
with open(filename) as f:
line = f.readline()
while line:
list_to_return.append(int(line))
line = f.readline()
return list_to_return
primeslist = filetolistofints('primenumbers.txt')
happieslist = filetolistofints('happynumbers.txt')
overlaplist = [elem for elem in primeslist if elem in happieslist]
print(overlaplist)
"""
|
class SellsyAuthenticateError(Exception):
pass
class SellsyError(Exception):
def __init__(self, sellsy_code_error, message):
super(SellsyError, self).__init__(message)
self.sellsy_code_error = sellsy_code_error
self.message = message
def __str__(self):
return '{} - {}'.format(self.sellsy_code_error, self.message)
|
from datetime import datetime
import os
import logging
import numpy as np
import pandas as pd
import pystore
from decimal import Decimal
from tests.fixtures import Fixes
from src.crypto_accountant.bookkeeper import BookKeeper
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
pd.set_option("display.expand_frame_repr", False)
pd.set_option("display.width", 0)
pd.set_option('display.float_format', lambda x: '%.8f' % x)
pd.set_option("display.max_colwidth", 20)
pd.set_option("display.max_rows", None)
pd.set_option("display.min_rows", 100)
pd.set_option("display.max_columns", 8)
# Hardcoded transactions
# txs = TxnFactory.hardcoded_txs()
# Firebase Transactions
firestore_cred_file = Fixes.firestore_cred_file(Fixes.storage_dir())
firestore_ref = Fixes.firestore_ref(firestore_cred_file)
txs = Fixes.firestore_user_transactions(firestore_ref)
# Pystore historical data
pystore.set_path("/Volumes/CAPA/.storage")
store = pystore.store("messari")
historical_prices = store.collection('price')
def get_historical_df(symbols):
df = pd.DataFrame()
available_assets = historical_prices.list_items()
not_found_assets = []
for symbol in symbols:
if symbol in available_assets:
prices = historical_prices.item(symbol).to_pandas()
prices[symbol] = prices['close'].apply(lambda x: Decimal(x))
prices = prices.drop(['open', 'close', 'high', 'low', 'volume'], axis=1)
if df.empty:
df = prices.copy()
else:
df = df.join(prices, how='outer')
else:
not_found_assets.append(symbol)
print(not_found_assets)
df[not_found_assets] = np.nan # create nan columns for unfound assets
df['USD'] = Decimal(1) # create nan columns for unfound assets
df = df.fillna(Decimal(0))
df.index = df.index.tz_localize(tz='UTC').floor('1D')
return df
start = datetime.now()
# initialize bookkeeper
bk = BookKeeper()
bk.add_txs(txs, auto_detect=True)
eq_curve = bk.ledger.generate_equity_curve('assets')
print(eq_curve)
# multiply qty df with price df and then sum them all into total
# historical = get_historical_df(bk.ledger.symbols)
# val_curve = eq_curve.mul(historical)
# val_curve['total'] = val_curve[bk.ledger.symbols].sum(axis=1)
# print(val_curve['total'])
print(datetime.now() - start)
|
import datetime
from django.db import models
from django.db.models import signals
from django.conf import settings
from django.utils import simplejson as json
from django.dispatch import dispatcher
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, datetime.date):
return obj.strftime('%Y-%m-%d')
elif isinstance(obj, datetime.time):
return obj.strftime('%H:%M:%S')
return json.JSONEncoder.default(self, obj)
def dumps(data):
return JSONEncoder().encode(data)
def loads(str):
return json.loads(str, encoding=settings.DEFAULT_CHARSET)
class JSONField(models.TextField):
"""
Cribbed directly from http://www.djangosnippets.org/snippets/377/
This is a great way to pack extra data into a model object, where
the structure is dynamic, and not relational. For instance, if you
wanted to store a list of dictionaries. The data won't be
classically searchable, but you can define pretty much any data
construct you'd like, as long as it is JSON-serializable. It's
especially useful in a JSON heavy application or one that deals
with a lot of java script.
Example (models.py):
from django.db import models
from jsonfield import JSONField
class Sequence(models.Model):
name = models.CharField(maxlength=25)
list = JSONField()
Example (shell):
fib = Sequence(name='Fibonacci')
fib.list = [0, 1, 1, 2, 3, 5, 8]
fib.save()
fib = Sequence.objects.get(name='Fibonacci')
fib.list.append(13)
print fib.list
[0, 1, 1, 2, 3, 5, 8, 13]
fib.get_list_json()
"[0, 1, 1, 2, 3, 5, 8, 13]"
NOTE: You can only save JSON-serializable data. Also, dates will
be converted to string-timestamps, because I don't really know
what better to do with them. Finally, I'm not sure how to interact
with forms yet, so that realm is a bit murky.
"""
def db_type(self):
return 'text'
def pre_save(self, model_instance, add):
value = getattr(model_instance, self.attname, None)
return dumps(value)
def contribute_to_class(self, cls, name):
super(JSONField, self).contribute_to_class(cls, name)
dispatcher.connect(self.post_init, signal=signals.post_init, sender=cls)
def get_json(model_instance):
return dumps(getattr(model_instance, self.attname, None))
setattr(cls, 'get_%s_json' % self.name, get_json)
def set_json(model_instance, json):
return setattr(model_instance, self.attname, loads(json))
setattr(cls, 'set_%s_json' % self.name, set_json)
def post_init(self, instance=None):
value = self.value_from_object(instance)
if (value):
setattr(instance, self.attname, loads(value))
else:
setattr(instance, self.attname, None)
|
# ###########################################################################
#
# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)
# (C) Cloudera, Inc. 2021
# All rights reserved.
#
# Applicable Open Source License: Apache 2.0
#
# NOTE: Cloudera open source products are modular software products
# made up of hundreds of individual components, each of which was
# individually copyrighted. Each Cloudera open source product is a
# collective work under U.S. Copyright Law. Your license to use the
# collective work is as provided in your written agreement with
# Cloudera. Used apart from the collective work, this file is
# licensed for your use pursuant to the open source license
# identified above.
#
# This code is provided to you pursuant a written agreement with
# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute
# this code. If you do not have a written agreement with Cloudera nor
# with an authorized and properly licensed third party, you do not
# have any rights to access nor to use this code.
#
# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the
# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY
# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED
# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO
# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,
# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS
# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE
# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR
# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES
# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF
# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF
# DATA.
#
# ###########################################################################
from PIL import Image
from rank_bm25 import BM25Okapi
import wikipedia as wiki
import pandas as pd
import streamlit as st
from transformers import pipeline, AutoModelForQuestionAnswering, AutoTokenizer
from qa.utils import absolute_path
MODEL_OPTIONS = {
"BERT": "deepset/bert-base-cased-squad2",
"RoBERTa": "mbeck/roberta-base-squad2",
"DistilBERT": "twmkn9/distilbert-base-uncased-squad2",
"MiniLM": "deepset/minilm-uncased-squad2",
"XLM-RoBERTa": "deepset/xlm-roberta-large-squad2",
}
CONTEXT_OPTIONS = {
"Wikipedia summary paragraph": "summary",
"Full Wikipedia article": "full",
"Use RelSnip to identify most relevant sections": "relsnip",
}
@st.cache(allow_output_mutation=True)
def load_model(model_choice):
model_name = MODEL_OPTIONS[model_choice]
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = pipeline("question-answering", model=model_name, tokenizer=tokenizer)
return model
def highlight_text(segment, context, full_text=False):
if segment not in context:
return
length = len(segment)
if full_text:
# find the section the answer was found in and display only that section
chunks = context.split("==")
for chunk in chunks:
if segment in chunk:
idx = chunk.index(segment)
chunk1 = chunk[:idx]
chunk2 = chunk[idx : idx + length]
chunk3 = chunk[idx + length :]
break
else:
idx = context.index(segment)
chunk1 = context[:idx]
chunk2 = context[idx : idx + length]
chunk3 = context[idx + length :]
new_context = (
chunk1
+ '<span style="background-color: #FFFF00"> **'
+ chunk2
+ "** </span>"
+ chunk3
)
return new_context
def relsnip(context, num_fragments=5):
# Wiki section headings are wrapped with "==", (e.g., == Color ==)
# split the context by article sections
chunks = context.split("\n== ")
# Remove sections that won't contain an answer
chunks_cleaned = list()
for chunk in chunks:
subchunks = chunk.split(" ==")
if subchunks[0] in [
"See also",
"References",
"Further reading",
"External links",
]:
continue
chunks_cleaned.append(chunk)
# tokenize each chunk and pass to BM25 search algorithm
tokenized_chunks = [chunk.split(" ") for chunk in chunks_cleaned]
bm25 = BM25Okapi(tokenized_chunks)
# tokenize the query and score each chunk
tokenized_query = query.split(" ")
chunk_scores = bm25.get_scores(tokenized_query)
# sort the chunks by their BM25 score
sorted_chunks = sorted([c for s, c in zip(chunk_scores, chunks)], reverse=True)
# select the num_fragments highest scoring chunks
short_context = ""
for chunk in sorted_chunks[:num_fragments]:
short_context = short_context + chunk
return short_context
def make_url(segment, url):
new_segment = f'<a target="_blank" href="{url}">{segment}</a>'
return new_segment
# ------ SIDEBAR SELECTIONS ------
image = Image.open(absolute_path("images", "cloudera-fast-forward.png"))
st.sidebar.image(image, use_column_width=True)
st.sidebar.markdown(
"This app demonstrates a simple question answering system on Wikipedia. \
The question is first used in Wikipedia's default search engine, \
resulting in a ranked list of relevant Wikipedia pages. \
The question and each Wikipedia page are then sent to the QA model, which returns answers \
extracted from the text."
)
model_choice = st.sidebar.selectbox(
"Choose a Transformer model:", list(MODEL_OPTIONS.keys())
)
number_of_pages = st.sidebar.slider(
"How many Wikipedia pages should be displayed?", 1, 5, 1
)
number_of_answers = st.sidebar.slider(
"How many answers should the model suggest for each Wikipedia page?", 1, 5, 1
)
st.sidebar.text("")
st.sidebar.markdown(
"By default, the QA Model will only process the Wikipedia **summary** for answers. \
This saves time since Wikipedia pages are long and QA models are *slow*. \
Here, you can opt to use the **full text** of the article, or you can \
choose **RelSnip**, which uses BM25 to identify the most relevant sections \
of Wikipedia pages."
)
context_choice = st.sidebar.selectbox(
"Choose which part of the Wikipedia page(s) to process:",
list(CONTEXT_OPTIONS.keys()),
)
context_selection = CONTEXT_OPTIONS[context_choice]
if context_selection == "relsnip":
num_sections = st.sidebar.slider(
"How many sections should RelSnip identify?", 3, 7, 5
)
st.sidebar.markdown(
"**NOTE: Including more text often results in a better answer, but longer inference times.**"
)
# ------ BEGIN APP ------
st.title("Question Answering with ")
image = absolute_path("images/669px-Wikipedia-logo-v2-en.svg.png")
st.image(Image.open(image), width=400)
# ------ LOAD QA MODEL ------
reader = load_model(model_choice)
# ------ GET QUESTION ------
st.markdown("## Ask a question")
query = st.text_input("Enter text here", "Why is the sky blue?")
st.markdown(f"## Displaying the top {number_of_pages} results:")
# ------ SEARCH ENGINE (RETRIEVER) ------
results = wiki.search(query, results=number_of_pages)
# ------ ANSWER EXTRACTION (READER) ------
for i, result in enumerate(results):
wiki_page = wiki.page(result, auto_suggest=False)
# display the Wiki title as a URL
title_url = make_url(result, wiki_page.url)
st.markdown("### " + str(i + 1) + ") " + title_url, unsafe_allow_html=True)
use_full_text = True
# grab text for answer extraction
if context_selection == "full":
context = wiki_page.content
elif context_selection == "relsnip":
context = wiki_page.content
context = relsnip(context, num_sections)
else:
context = wiki_page.summary
use_full_text = False
# extract answers
inputs = {"question": query, "context": context}
answers = reader(inputs, **{"topk": number_of_answers})
try:
answerdf = pd.DataFrame(answers)
except:
answerdf = pd.DataFrame(answers, index=[0])
# display results
hilite_context = highlight_text(
answerdf["answer"][0], context, full_text=use_full_text
)
st.markdown(hilite_context, unsafe_allow_html=True)
answerdf.drop(columns=["start", "end"], inplace=True)
st.table(answerdf)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import json
import glob
import logging
import re
import pandas
import numpy as np
from collections import defaultdict
from collections import OrderedDict
data_path = './'
n = 0
class data_passport:
def __init__(self):
self.n_conf_files = 0
self.n_cf_formats = {}
self.n_data_files = 0
self.n_empty_conf_files = 0
self.n_ecf_formats = defaultdict(int)
self.n_uiks = 0
self.n_subjects = 0
self.n_valid_bulletins = 0
self.n_not_valid_bulletins = 0
self.n_given_bulletins = 0
self.n_registered_voters = 0
self.n_candidates = 0
self.n_data_errors = 0
self.data_errors = {}
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
dp = data_passport()
logging.debug('args:')
for a in sys.argv:
logging.debug(a)
args = sys.argv[1:]
if '--data' in args:
data_path = args[1]
logging.info('Data directory: %s', data_path)
formats = defaultdict(int)
dig_formats = defaultdict(int)
for e_cfg_file in glob.glob(os.path.join(data_path, '*.json')):
with open(e_cfg_file) as json_data:
ecfg = json.load(json_data)
dp.n_conf_files += 1
pattern = re.compile(r'(\dNL).(json)')
res = re.search(pattern, e_cfg_file)
if res is None:
pattern = re.compile(r'(_\d\d).(json)')
res = re.search(pattern, e_cfg_file)
if res is None:
pattern = re.compile(r'(_\d).(json)')
res = re.search(pattern, e_cfg_file)
if res is None:
print e_cfg_file + ' did not match any pattern, exiting'
exit(1)
res = res.group(1)
dd = re.search(r'\d+', res).group()
if 'NL' in res:
dp.n_empty_conf_files += 1
dp.n_ecf_formats[dd] += 1
else:
if dd not in dp.n_cf_formats:
dp.n_cf_formats[dd] = {
'nfiles' : 0,
'nuiks': 0,
'nsubjects': 0,
'nvalid_bulletins': 0,
'nnotvalid_bulletins': 0,
'ngiven_bulletins': 0,
'nregistered_voters': 0,
'ncandidates': 0,
'ndata_errors': 0
}
df = pandas.read_csv(data_path + '/' + ecfg['source_file'], encoding="utf-8", delimiter=',')
dp.n_data_files += 1
dp.n_cf_formats[dd]['nfiles'] += 1
vbc = ecfg['valid_bulletins_column']
valid_bulletins = np.array(df[vbc], dtype=float)
dp.n_valid_bulletins += np.int(valid_bulletins.sum())
gbc = 'calc0'
given_bulletins = np.array(df[gbc], dtype=float)
dp.n_given_bulletins += np.int(given_bulletins.sum())
dp.n_cf_formats[dd]['ngiven_bulletins'] += np.int(given_bulletins.sum())
nvbc = ecfg['not_valid_bulletins_column']
not_valid_bulletins = np.array(df[nvbc], dtype=float)
dp.n_not_valid_bulletins += np.int(not_valid_bulletins.sum())
dp.n_cf_formats[dd]['nvalid_bulletins'] += np.int(valid_bulletins.sum())
dp.n_cf_formats[dd]['nnotvalid_bulletins'] += np.int(not_valid_bulletins.sum())
dp.n_uiks += df[vbc].count()
dp.n_cf_formats[dd]['nuiks'] += df[vbc].count()
dp.n_candidates += len(ecfg['candidates_columns'])
dp.n_cf_formats[dd]['ncandidates'] += len(ecfg['candidates_columns'])
dp.n_data_errors += len(ecfg['data_errors'])
dp.n_cf_formats[dd]['ndata_errors'] += len(ecfg['data_errors'])
for e in ecfg['data_errors']:
if e['kind'] in dp.data_errors:
dp.data_errors[e['kind']] += 1
else:
dp.data_errors[e['kind']] = 1
# if e['kind'] == 10:
# print 'In ', e_cfg_file, e['comment'].encode('utf-8')
registered_voters = np.array(df[ecfg['registered_voters_column']], dtype=float)
registered_voters[np.isnan(registered_voters)] = 0
dp.n_registered_voters += int(registered_voters.sum())
dp.n_cf_formats[dd]['nregistered_voters'] += int(registered_voters.sum()) #####
for k in dp.__dict__:
if type(dp.__dict__[k]) in [int, np.int64]:
logging.info('%s: %d', k, dp.__dict__[k])
else:
logging.info(k)
od = OrderedDict(sorted(dp.__dict__[k].items()))
for v in od:
if type(od[v]) != dict:
logging.info('\t%s: %d', v, od[v])
else:
logging.info('\t%s', v)
for vv in od[v]:
logging.info('\t\t%s: %d', vv, od[v][vv])
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
import pandas as pd
import os
# In[ ]:
mydir = os.getcwd()
result = []
for file in os.listdir(mydir):
if file.endswith("-0.csv"):
result.append(os.path.join(file))
#print(os.path.join(file))
sorted_list_0 = sorted(result)
sorted_list_0
result = []
for file in os.listdir(mydir):
if file.endswith("-1.csv"):
result.append(os.path.join(file))
#print(os.path.join(file))
sorted_list_1 = sorted(result)
sorted_list_1
result = []
for file in os.listdir(mydir):
if file.endswith("-2.csv"):
result.append(os.path.join(file))
#print(os.path.join(file))
sorted_list_2 = sorted(result)
#sorted_list_2
# In[ ]:
length = len(sorted_list_0)
for i in range(length):
input_date_0 = sorted_list_0[i]
input_date_1 = sorted_list_1[i]
input_date_2 = sorted_list_2[i]
input_date = input_date_0[:-8]
## BELOW IS THE PROGRAM/CODE TO RETRIEVE ALL RATIOS FOR BS, CSI & CSCF
print('\n')
print('\n')
print(input_date_0)
file = pd.read_csv(input_date_0)
file = file.drop(file.columns[2:], axis=1)
file = file.set_axis([input_date_0, 'Default'], axis=1, inplace=False)
col_one_list_0 = file[input_date_0].tolist()
#col_one_arr = file[input_date_0].to_numpy()
#print(f"\ncol_one_list:\n{col_one_list}\ntype:{type(col_one_list)}")
#print(f"\ncol_one_arr:\n{col_one_arr}\ntype:{type(col_one_arr)}")
#Enter subs
#subs_00 = 'Total liabilities and s'
subs00 = 'Marketable securities'
subs01 = 'Cash and cash equivalents'
subs02 = 'Accounts receivable'
subs03 = 'Total current a'
subs04 = 'Total current l'
subs05 = 'Total assets'
subs06 = 'Total liabilities'
subs07 = 'Retaine'
subs08 = 'Total stockholder'
subs09 = 'Property'
subs10 = 'Short'
subs11 = 'Long'
subs12 = 'Convertible preferred stock, $0.001'
subs13 = 'and Class B'
subs14 = 'Prepaid'
#res_00 = [i for i in col_one_list if subs_00 in i]
#print(res_00)
#file.replace(res_00, "NaN", inplace=True)
res00 = [i for i in col_one_list_0 if subs00 in i]
#print(res00)
res01 = [i for i in col_one_list_0 if subs01 in i]
#print(res01)
res02 = [i for i in col_one_list_0 if subs02 in i]
#print(res02)
res03 = [i for i in col_one_list_0 if subs03 in i]
#print(res03)
res04 = [i for i in col_one_list_0 if subs04 in i]
#print(res04)
res05 = [i for i in col_one_list_0 if subs05 in i]
#print(res05)
res06 = [i for i in col_one_list_0 if subs06 in i]
#print(res06)
res07 = [i for i in col_one_list_0 if subs07 in i]
#print(res07)
res08 = [i for i in col_one_list_0 if subs08 in i]
#print(res08)
res09 = [i for i in col_one_list_0 if subs09 in i]
#print(res09)
res10 = [i for i in col_one_list_0 if subs10 in i]
#print(res10)
res11 = [i for i in col_one_list_0 if subs11 in i]
#print(res11)
res12 = [i for i in col_one_list_0 if subs12 in i]
#print(res12)
res13 = [i for i in col_one_list_0 if subs13 in i]
#print(res13)
res14 = [i for i in col_one_list_0 if subs14 in i]
#print(res14)
if len(res00) >= 2:
res00 = res00[0]
#print(res00)
if len(res06) >= 2:
res06 = res06[0]
#print(res06)
if len(res14) >= 2:
res14 = res14[0]
#print(res14)
file.replace(res00,
"Marketable securities", inplace=True)
file.replace(res01,
"Cash and cash equivalents", inplace=True)
file.replace(res02,
"Accounts receivable", inplace=True)
file.replace(res03,
"Current assets", inplace=True)
file.replace(res04,
"Current liabilities", inplace=True)
file.replace(res05,
"Total assets", inplace=True)
file.replace(res06,
"Total liabilities", inplace=True)
file.replace(res07,
"Retained earnings", inplace=True)
file.replace(res08,
"Total stockholder's equity", inplace=True)
file.replace(res09,
"Property, plant and equipment", inplace=True)
file.replace(res10,
"Short-term debt", inplace=True)
file.replace(res11,
"Long-term debt", inplace=True)
file.replace(res12,
"Convertible stocks", inplace=True)
file.replace(res13,
"Class A and Class B stocks", inplace=True)
file.replace(res14,
"Prepaid expenses", inplace=True)
#file
#file.loc[file[input_date_0] == 'Marketable securities']
## PREPAID EXPENSES DOES NOT EXIST
file = file[file['Default'].notna()]
col_one_arr = file.loc[file[input_date_0].isin(['Marketable securities',
'Cash and cash equivalents',
'Accounts receivable',
'Current assets',
'Current liabilities',
'Total assets',
'Total liabilities',
'Retained earnings',
"Total stockholder's equity",
'Property, plant and equipment',
'Short-term debt',
'Long-term debt',
'Convertible stocks',
'Class A and Class B stocks',
'Prepaid expenses'])].to_numpy()
if res14:
col_one_arr_01 = col_one_arr
#print("Prepaid. Expenses exists.")
else:
col_one_arr_00 = np.array([['Prepaid expenses', '0']])
col_one_arr_01 = np.append(col_one_arr,col_one_arr_00, axis=0)
#print("Prepaid. Expenses has been added.")
#print(col_one_arr_01)
if res10:
col_one_arr_02 = col_one_arr_01
#print("Short-term Debt exists.")
else:
col_one_arr_00 = np.array([['Short-term debt', '0']])
col_one_arr_02 = np.append(col_one_arr_01,col_one_arr_00, axis=0)
#print("Short-term Debt has been added.")
#print(col_one_arr_02)
if res12:
col_one_arr_03 = col_one_arr_02
#print("Convertible Stocks exists.")
else:
col_one_arr_00 = np.array([['Convertible stocks', '0']])
col_one_arr_03 = np.append(col_one_arr_02,col_one_arr_00, axis=0)
#print("Convertible Stocks has been added.")
#print(col_one_arr_03)
col_one_arr_CSI_04 = np.vstack({tuple(row) for row in col_one_arr_03})
col_one_arr_BS = np.array(sorted(col_one_arr_CSI_04 ,key=lambda x: x[0]))
#col_one_arr_BS = col_one_arr
## CONVERTIBLE STOCKS NEED TO BE DEFINED (MAX) OR *HIGHEST VALUE - IF MULTIPLE
#obj = len(col_one_arr)
#np.insert(col_one_arr, obj, ["PE_1", "PE_1_VALUE"], 0)
#new_arr00 = np.append(col_one_arr, [["PE_1", "PE_1_VALUE"]],0)
#new_arr01 = np.append(new_arr00, [["PE_1", "PE_1_VALUE"]],0)
#new_arr02 = np.append(new_arr01, [["PE_2", "PE_2_VALUE"]],0)
#new_arr03 = np.append(new_arr02, [["PEG_1", "PEG_1_VALUE"]],0)
#new_arr04 = np.append(new_arr03, [["PEG_2", "PEG_2_VALUE"]],0)
#new_arr05 = np.append(new_arr04, [["EV", "EB_VALUE"]],0)
#new_arr06 = np.append(new_arr05, [["EBIT", "EBIT_VALUE"]],0)
#new_arr07 = np.append(new_arr06, [["EBITDA", "EBITDA_VALUE"]],0)
#new_arr08 = np.append(new_arr07, [["DE", "DE_VALUE"]],0)
#new_arr09 = np.append(new_arr08, [["NE", "NE_VALUE"]],0)
#new_arr10 = np.append(new_arr09, [["QR_1", "QR_1_VALUE"]],0)
#new_arr11 = np.append(new_arr10, [["QR_2", "QR_2_VALUE"]],0)
#new_arr12 = np.append(new_arr11, [["DY", "DY_VALUE"]],0)
#new_arr13 = np.append(new_arr12, [["DPS", "DPS_VALUE"]],0)
#new_arr14 = np.append(new_arr13, [["EPS", "EPS_VALUE"]],0)
#new_arr15 = np.append(new_arr14, [["MCAP", "MCAP_VALUE"]],0)
#new_arr16 = np.append(new_arr15, [["ROE", "ROE_VALUE"]],0)
#new_arr17 = np.append(new_arr16, [["NetMargin", "NetMargin_VALUE"]],0)
#new_arr18 = np.append(new_arr17, [["OperatingMargin", "OperatingMargin_VALUE"]],0)
#new_arr18
file = pd.read_csv(input_date_1)
file = file.drop(file.columns[2:], axis=1)
file = file.set_axis([input_date_1, 'Default'], axis=1, inplace=False)
file = file.dropna()
col_one_list_1 = file[input_date_1].tolist()
#print(f"\ncol_one_list:\n{col_one_list}\ntype:{type(col_one_list)}")
col_one_list_1[0] = "Additional Info"
#Enter subs
Rev = 'Revenues'
#print(Rev)
Rev_0 = 'Revenue'
#print(Rev_0)
NI = 'Net income'
#print(NI)
RD = 'Research and development'
#print(RD)
subs00 = 'Cost of'
subs01 = 'Income from operations'
subs02 = 'Income before'
subs03 = 'asic'
subs04 = 'iluted'
subs05 = 'Less'
res00 = [i for i in col_one_list_1 if subs00 in i]
#print(res00)
res01 = [i for i in col_one_list_1 if subs01 in i]
#print(res01)
res02 = [i for i in col_one_list_1 if subs02 in i]
#print(res02)
res03 = [i for i in col_one_list_1 if subs03 in i]
#print(res03)
res04 = [i for i in col_one_list_1 if subs04 in i]
#print(res04)
res05 = [i for i in col_one_list_1 if subs05 in i]
#print(res05)
if len(NI) >= 2:
NI = NI[0]
#print(NI)
file.replace(RD,
"Research and development", inplace=True)
file.replace(Rev_0,
"Revenues", inplace=True)
file.replace(Rev,
"Revenues", inplace=True)
file.replace(NI,
"Net income", inplace=True)
file.replace(res00,
"Cost of revenues", inplace=True)
file.replace(res01,
"Operating income", inplace=True)
file.replace(res02,
"Income before taxes", inplace=True)
file.replace(res03,
"Basic EPS", inplace=True)
file.replace(res04,
"Diluted EPS", inplace=True)
file.replace(res05,
"Less: Adjustment Payments", inplace=True)
col_one_arr = file.loc[file[input_date_1].isin(['Revenues',
'Net income',
'Research and development',
'Cost of revenues',
'Research and development',
'Operating income',
'Income before taxes',
'Basic EPS',
'Diluted EPS',
'Less: Adjustment Payments'])].to_numpy()
#print(col_one_arr)
#print(res05)
#print(type(res05))
if res05:
col_one_arr_01 = col_one_arr
#print("Adj. Payments exists.")
else:
col_one_arr_00 = np.array([['Less: Adjustment Payments', '0']])
col_one_arr_01 = np.append(col_one_arr,col_one_arr_00, axis=0)
#print("Adj. Payments has been added.")
#print(col_one_arr_01)
col_one_arr_CSI_00 = np.vstack({tuple(row) for row in col_one_arr_01})
col_one_arr_CSI_01 = np.array(sorted(col_one_arr_CSI_00 ,key=lambda x: x[0]))
#print(col_one_arr_CSI_01)
###
counts = {}
for elem in col_one_arr_CSI_00:
counts[elem[0]] = counts.get(elem[0], 0) + 1
new_array = []
for elem in col_one_arr_CSI_00:
if counts[elem[0]] > 1:
new_array.append(elem)
ni_loc_0 = np.where(col_one_arr_CSI_01 == 'Net income')[0][0]
ni_loc_1 = np.where(col_one_arr_CSI_01 == 'Net income')[0][1]
Net_income_v_0 = col_one_arr_CSI_01[ni_loc_0][1]
Net_income_v_1 = col_one_arr_CSI_01[ni_loc_1][1]
if Net_income_v_0 > Net_income_v_1:
Net_income_v_2 = Net_income_v_0
else:
Net_income_v_2 = Net_income_v_1
#print(Net_income_v_2)
if col_one_arr_CSI_01[ni_loc_0][1] == Net_income_v_2:
col_one_arr_CSI_0 = np.delete(col_one_arr_CSI_01, ni_loc_1, 0)
#print('NI_LOC_0')
else:
col_one_arr_CSI_0 = np.delete(col_one_arr_CSI_01, ni_loc_0, 0)
#print('NI_LOC_1')
col_one_arr_CSI = col_one_arr_CSI_0
else:
ni_loc_0 = np.where(col_one_arr_CSI_01 == 'Net income')[0]
col_one_arr_CSI = col_one_arr_CSI_01
#print(col_one_arr_CSI)
#['Net income'] in col_one_arr_CSI[6]
#print(input_date_2)
file = pd.read_csv(input_date_2)
#print(file)
file = file.drop(file.columns[2:], axis=1)
file = file.set_axis([input_date_2, 'Default'], axis=1, inplace=False)
#print(file)
col_one_list_2 = file[input_date_2].tolist()
#print(col_one_list_0)
#print(f"\ncol_one_list:\n{col_one_list}\ntype:{type(col_one_list)}")
col_one_list_2[0] = "Additional Info"
#col_one_list = list(dict.fromkeys(col_one_list_0))
#print(col_one_list)
subs00 = 'Net income'
subs01 = 'Depreciation'
subs02 = 'Amortization'
subs03 = 'Prepaid'
subs04 = 'by operating activities'
subs05 = 'in investing activities'
subs06 = 'in financing activities'
subs07 = 'Purchases of property'
res00 = [i for i in col_one_list_2 if subs00 in i]
#print(res00)
res01 = [i for i in col_one_list_2 if subs01 in i]
#print(res01)
res02 = [i for i in col_one_list_2 if subs02 in i]
#print(res02)
res03 = [i for i in col_one_list_2 if subs03 in i]
#print(res03)
res04 = [i for i in col_one_list_2 if subs04 in i]
#print(res04)
res05 = [i for i in col_one_list_2 if subs05 in i]
#print(res05)
res06 = [i for i in col_one_list_2 if subs06 in i]
#print(res06)
res07 = [i for i in col_one_list_2 if subs07 in i]
#print(res07)
file.replace(res00,
"Net income", inplace=True)
file.replace(res01,
"Depreciation", inplace=True)
file.replace(res02,
"Amortization", inplace=True)
file.replace(res03,
"Prepaid expenses", inplace=True)
file.replace(res04,
"Net cash provided by operating activities", inplace=True)
file.replace(res05,
"Net cash provided by investing activities", inplace=True)
file.replace(res06,
"Net cash provided by financing activities", inplace=True)
file.replace(res07,
"Purchase of property, plant and equipment", inplace=True)
col_one_list_2 = file.loc[file[input_date_2].isin(['Net income',
'Depreciation',
'Amortization',
'Prepaid expenses',
'Net cash provided by operating activities',
'Net cash provided by investing activities',
'Net cash provided by financing activities',
'Purchase of property, plant and equipment'])].to_numpy()
#print(col_one_list_2)
if res03:
col_one_arr_01 = col_one_list_2
#print("Prepaid. Expenses exists.")
else:
col_one_arr_00 = np.array([['Prepaid expenses', '0']])
col_one_arr_01 = np.append(col_one_arr,col_one_arr_00, axis=0)
#print("Prepaid. Expenses has been added.")
#print(col_one_arr_01)
col_one_arr_CSCF = np.array(sorted(col_one_arr_01 ,key=lambda x: x[0]))
#print(col_one_arr_BS)
Accounts_receivable = float(col_one_arr_BS[0,1])
Cash_and_cash_equivalents = float(col_one_arr_BS[1,1])
Class_A_and_Class_B_stocks = float(col_one_arr_BS[2,1])
Convertible_stocks = float(col_one_arr_BS[3,1])
Current_assets = float(col_one_arr_BS[4,1])
Current_liabilities = float(col_one_arr_BS[5,1])
Long_term_debt = float(col_one_arr_BS[6,1])
Marketable_securities = float(col_one_arr_BS[7,1])
Prepaid_expenses = float(col_one_arr_BS[8,1])
Property_plant_and_equipment = float(col_one_arr_BS[9,1])
Retained_earnings = float(col_one_arr_BS[10,1])
Short_term_debt = float(col_one_arr_BS[11,1])
Total_assets = float(col_one_arr_BS[12,1])
Total_liabilities = float(col_one_arr_BS[13,1])
Total_stockholder_equity = float(col_one_arr_BS[14,1])
#print(col_one_arr_CSI)
#Consolidated Statement of Income CSI
Basic_EPS = float(col_one_arr_CSI[0,1])
Cost_of_revenues = float(col_one_arr_CSI[1,1])
Diluted_EPS = float(col_one_arr_CSI[2,1])
Income_before_taxes = float(col_one_arr_CSI[3,1])
Adjustment_Payments = float(col_one_arr_CSI[4,1])
Net_income = float(col_one_arr_CSI[5,1])
Operating_income = float(col_one_arr_CSI[6,1])
Research_Development = float(col_one_arr_CSI[7,1])
Revenues = float(col_one_arr_CSI[8,1])
#print(col_one_arr_CSCF)
#Consolidated Statement of Cash Flow CSCF
Net_income_CSCF = float(col_one_arr_CSCF[0,1])
Depreciation = float(col_one_arr_CSCF[1,1])
Amortization = float(col_one_arr_CSCF[2,1])
Prepaid_expenses_CSCF = float(col_one_arr_CSCF[3,1])
Net_cash_operating_activities = float(col_one_arr_CSCF[4,1])
Purchase_of_PPE = float(col_one_arr_CSCF[5,1])
Net_cash_investing_activities = float(col_one_arr_CSCF[6,1])
Net_cash_financing_activities = float(col_one_arr_CSCF[7,1])
#Avg Share Price
file = pd.read_csv('Alphabet Inc.csv')
## TRY STATEMENT FOR date_input-29 -28 -27 -26...
date_prefix = input_date_0[:-8]
date_ls = ["31", "30", "29", "28"]
upd_date_0 = date_prefix + date_ls[0]
upd_date_1 = date_prefix + date_ls[1]
upd_date_2 = date_prefix + date_ls[2]
upd_date_3 = date_prefix + date_ls[3]
#print(upd_date_0)
print('\n')
if upd_date_0 in file.values:
col_one_arr_P = file[file['Date'].str.contains(upd_date_0)].to_numpy()
#print(upd_date_0 + " exists.")
else:
if upd_date_1 in file.values:
col_one_arr_P = file[file['Date'].str.contains(upd_date_1)].to_numpy()
#print("Month finishes in -30")
else:
if upd_date_2 in file.values:
col_one_arr_P = file[file['Date'].str.contains(upd_date_2)].to_numpy()
#print("Month finishes in -29")
else:
if upd_date_3 in file.values:
col_one_arr_P = file[file['Date'].str.contains(upd_date_3)].to_numpy()
#print("Month finishes in -28")
Open_P = col_one_arr_P[0,1]
Close_P = col_one_arr_P[0,-3]
Avg_P = ((Open_P + Close_P)/2)
col_one_arr_P_Modified = np.append(col_one_arr_P, [Avg_P])
col_one_arr_P_Modified
#Saved Number of Shares Outstanding (res13)
def listToString(res13):
str1 = ""
for ele in res13:
str1 += ele
return str1
str00 = listToString(res13)
str01 = str00.replace(",", "")
str02 = str(str01.split("; ",1)[1])[0:6]
Outstanding_shares = int(str02)
#Retained Earnings -3 months prior .calc(CHANGE)
#file.loc[file[input_date_0] == 'Retained earnings']
file = pd.read_csv(input_date_0)
file = file.drop(file.columns[1], axis=1)
file = file.set_axis([input_date_0, 'Default'], axis=1, inplace=False)
col_one_list = file[input_date_0].tolist()
#print(f"\ncol_one_list:\n{col_one_list}\ntype:{type(col_one_list)}")
subs99 = 'Retained earnings'
res99 = [i for i in col_one_list if subs99 in i]
col_one_arr = file.loc[file[input_date_0].isin(['Retained earnings'])].to_numpy()
RE_start = float(col_one_arr[0,1])
#https://www.investopedia.com/ask/answers/012015/how-do-i-calculate-dividend-payout-ratio-income-statement.asp
#Dividends Paid or Payout Ratio => DP = (NI + RE) - RE.close :::::: DP.start = (NI.start + RE.start) - RE.close
#Net Income -3 months prior .calc(CHANGE)
file = pd.read_csv(input_date_1)
file = file.drop(file.columns[3:], axis=1)
file = file.drop(file.columns[1], axis=1)
file = file.set_axis([input_date_1, 'Default'], axis=1, inplace=False)
col_one_arr = file.loc[file[input_date_1].isin(['Net income'])].to_numpy()
NI_start = float(col_one_arr[0,1])
RE_close = Retained_earnings
DP_VALUE = (NI_start + RE_start) - RE_close
#Free Cash Flow FCF - Additional Data from BS & CSCF
file = pd.read_csv(input_date_0)
file = file.drop(file.columns[1], axis=1)
file = file.set_axis([input_date_0, 'Default'], axis=1, inplace=False)
file = file[file['Default'].notna()]
col_one_list = file[input_date_0].tolist()
#print(f"\ncol_one_list:\n{col_one_list}\ntype:{type(col_one_list)}")
subs98 = 'Total current a'
subs97 = 'Total current l'
subs96 = 'Property'
res98 = [i for i in col_one_list if subs98 in i]
res97 = [i for i in col_one_list if subs97 in i]
res96 = [i for i in col_one_list if subs96 in i]
str00 = listToString(res98)
str01 = listToString(res97)
str02 = listToString(res96)
file.replace(str00, "Current assets", inplace=True)
file.replace(str01, "Current liabilities", inplace=True)
file.replace(str02, "Property, plant and equipment", inplace=True)
col_one_arr = file.loc[file[input_date_0].isin(["Current assets"])].to_numpy()
Current_assets_start = float(col_one_arr[0,1])
col_one_arr = file.loc[file[input_date_0].isin(["Current liabilities"])].to_numpy()
Current_liabilities_start = float(col_one_arr[0,1])
col_one_arr = file.loc[file[input_date_0].isin(["Property, plant and equipment"])].to_numpy()
Property_plant_and_equipment_start = float(col_one_arr[0,1])
#For OLD Depreciation Value
file = pd.read_csv(input_date_2)
file = file.drop(file.columns[3:], axis=1)
file = file.drop(file.columns[1], axis=1)
file = file.set_axis([input_date_2, 'Default'], axis=1, inplace=False)
col_one_list = file[input_date_2].tolist()
col_one_list[0] = "Additional Info"
subs95 = 'Depreciation'
res95 = [i for i in col_one_list if subs95 in i]
str03 = listToString(res95)
file.replace(str03, "Depreciation", inplace=True)
col_one_arr = file.loc[file[input_date_2].isin(["Depreciation"])].to_numpy()
Depreciation_start = float(col_one_arr[0,1])
#############################################################################
d = {input_date_0[:-6]: [
# BALANCE SHEET
'Accounts_receivable', #Balance_Sheet
'Cash_and_cash_equivalents', #Balance_Sheet
'Class_A_and_Class_B_stocks', #Balance_Sheet
'Convertible_stocks', #Balance_Sheet
'Outstanding_shares', #Balance_Sheet
'Current_assets', #Balance_Sheet
'Current_liabilities', #Balance_Sheet
'Long_term_debt', #Balance_Sheet
'Marketable_securities', #Balance_Sheet
'Prepaid_expenses', #Balance_Sheet
'Property_plant_and_equipment', #Balance_Sheet
'Retained_earnings', #Balance_Sheet
'Short_term_debt', #Balance_Sheet
'Total_assets', #Balance_Sheet
'Total_liabilities', #Balance_Sheet
'Total_stockholder_equity', #Balance_Sheet
# INCOME STATEMENT
'Basic_EPS', #Income_Statement
'Cost_of_revenues', #Income_Statement
'Diluted_EPS', #Income_Statement
'Income_before_taxes', #Income_Statement
'Adjustment_Payments', #Income_Statement
'Net_income', #Income_Statement
'Operating_income', #Income_Statement
'Research_Development', #Income_Statement
'Revenues', #Income_Statement
# CASH-FLOW STATEMENT
'Net_income_CSCF', #Cash_Flow_Statement
'Depreciation', #Cash_Flow_Statement
'Amortization', #Cash_Flow_Statement
'Prepaid_expenses_CSCF', #Cash_Flow_Statement
'Net_cash_operating_activities', #Cash_Flow_Statement
'Purchase_of_PPE', #Cash_Flow_Statement
'Net_cash_investing_activities', #Cash_Flow_Statement
'Net_cash_financing_activities'], #Cash_Flow_Statement
##########################################################################################
'Financial Variables /unit': [
# BALANCE SHEET
Accounts_receivable, #Balance_Sheet
Cash_and_cash_equivalents, #Balance_Sheet
Class_A_and_Class_B_stocks, #Balance_Sheet
Convertible_stocks, #Balance_Sheet
Outstanding_shares, #Balance_Sheet
Current_assets, #Balance_Sheet
Current_liabilities, #Balance_Sheet
Long_term_debt, #Balance_Sheet
Marketable_securities, #Balance_Sheet
Prepaid_expenses, #Balance_Sheet
Property_plant_and_equipment, #Balance_Sheet
Retained_earnings, #Balance_Sheet
Short_term_debt, #Balance_Sheet
Total_assets, #Balance_Sheet
Total_liabilities, #Balance_Sheet
Total_stockholder_equity, #Balance_Sheet
# INCOME STATEMENT
Basic_EPS, #Income_Statement
Cost_of_revenues, #Income_Statement
Diluted_EPS, #Income_Statement
Income_before_taxes, #Income_Statement
Adjustment_Payments, #Income_Statement
Net_income, #Income_Statement
Operating_income, #Income_Statement
Research_Development, #Income_Statement
Revenues, #Income_Statement
# CASH-FLOW STATEMENT
Net_income_CSCF, #Cash_Flow_Statement
Depreciation, #Cash_Flow_Statement
Amortization, #Cash_Flow_Statement
Prepaid_expenses_CSCF, #Cash_Flow_Statement
Net_cash_operating_activities, #Cash_Flow_Statement
Purchase_of_PPE, #Cash_Flow_Statement
Net_cash_investing_activities, #Cash_Flow_Statement
Net_cash_financing_activities #Cash_Flow_Statement
]}
df= pd.DataFrame(data=d)
print(df)
filename = input_date_0[:-6] + "-Variables-DF.csv"
df.to_csv(filename)
#Ratios Equation & Formula
#############################################################################
#Ratios Equation & Formula
#Price to Earnings
#PE_1_VALUE (Calculate EPS using Net Income & Number of shares issues and outstanding)
#PE_1_VALUE = Avg_P/(Revenues/Class_A_and_Class_B_stocks)
PE_1_VALUE = Avg_P / (Net_income / Outstanding_shares) / 1000 #Units
print(PE_1_VALUE)
#PE_2_VALUE (Calculate PE_2 using Market Price & Diluted EPS [CSI])
PE_2_0_VALUE = Avg_P / Diluted_EPS
print(PE_2_0_VALUE)
PE_2_1_VALUE = Avg_P / Basic_EPS
print(PE_2_1_VALUE)
print('\n')
#Price/Earnings to Growth
#PEG_1_VALUE (Calculate PEG using Expected Growth Rate as input) #print('Enter EGR:')
#MISSING VALUE EGR can only be determined at the present moment using Expected Growth Rate from: Citibank, Goldman Sachs, JPM, MorganStanley, BNP Paribas - Expected EPS Growth
#Other sources include the Wall Street Journal WSJ and Yahoo Finance
#PEG_2_VALUE (Calculate from past 3-6-9-12 months growth rate using linear and non-linear regressions)
#MISSING VALUE Data from the past 3-6-9-12 months
#Other source: https://www.investopedia.com/terms/p/price-earningsratio.asp
#EV and EBITDA-related ratios
EV_1_VALUE = Avg_P * Outstanding_shares
print(EV_1_VALUE)
EV_2_VALUE = Avg_P * Class_A_and_Class_B_stocks
print(EV_2_VALUE)
EBIT_VALUE = Net_income_CSCF #Interest (N.A)
print(EBIT_VALUE)
EBITDA_VALUE = EBIT_VALUE - Depreciation - Amortization
print(EBITDA_VALUE)
EV_1_EBITDA_VALUE = EV_1_VALUE / EBITDA_VALUE
print(EV_1_EBITDA_VALUE)
EV_2_EBITDA_VALUE = EV_2_VALUE / EBITDA_VALUE
print(EV_2_EBITDA_VALUE)
print('\n')
DE_VALUE = (Long_term_debt + Short_term_debt) / EBITDA_VALUE
print(DE_VALUE)
NE_VALUE = (Long_term_debt + Short_term_debt - Cash_and_cash_equivalents) / EBITDA_VALUE
print(NE_VALUE)
QR_1_VALUE = (Cash_and_cash_equivalents + Marketable_securities + Accounts_receivable) / Current_liabilities
print(QR_1_VALUE)
QR_2_VALUE = (Current_assets - Prepaid_expenses) / Current_liabilities
print(QR_2_VALUE)
print('\n')
#Dividend-related ratios
EPS_VALUE = (Net_income - Convertible_stocks) / Outstanding_shares
#https://www.investopedia.com/ask/answers/032515/what-difference-between-earnings-share-and-dividends-share.asp
print(EPS_VALUE)
#Dividends Paid or Payout Ratio => DP = (NI + RE) - RE.close :::::: DP.start = (NI.start + RE.start) - RE.close
#https://www.investopedia.com/ask/answers/012015/how-do-i-calculate-dividend-payout-ratio-income-statement.asp
DPS_VALUE = DP_VALUE / Outstanding_shares
print(DPS_VALUE)
DY_1_VALUE = Net_income / ((RE_close - RE_start) / RE_close) * 4 # for 4 Quarters/Year
print(DY_1_VALUE) #Approximation. Dividend Yield should be calculated within -12 months, not -3months
DY_2_VALUE = DPS_VALUE / Avg_P * 1000 #Units
print(DY_2_VALUE)
Div_VALUE = -(Net_cash_financing_activities)
print(Div_VALUE)
print('\n')
#Other Ratios (MCAP, ROE, Margins...) Verify Ratio Formulas
MCAP_VALUE = Avg_P / Outstanding_shares
print(MCAP_VALUE)
ROE_VALUE = Net_income / Total_stockholder_equity * 4 # or 4 Quarters/Year :::: Other formula(s) exist
print(ROE_VALUE)
NetMargin_VALUE = (Revenues - Cost_of_revenues) / Revenues
print(NetMargin_VALUE)
OperatingMargin_VALUE = Net_cash_operating_activities - Revenues
print(OperatingMargin_VALUE)
print('\n')
#MISSING VALUE: Income before Tax for -6 months (Income_before_taxes is only -3 months). Use current Tax Rate
print(Net_income)
print(Income_before_taxes)
Tax_VALUE = 1 - (Net_income / Income_before_taxes)
print(Tax_VALUE)
FCF_2_VALUE = (Property_plant_and_equipment + (Depreciation/2)) - (Property_plant_and_equipment_start + (Depreciation_start/2)) #CHANGE(CAPEX) also looking at old Depreciation
print(FCF_2_VALUE)
#CHANGE(Assets-Liabilities) - ()#CHANGE(CAPEX)
FCF_0_VALUE = EBIT_VALUE * (1 - Tax_VALUE) - (Amortization/2) - ((Current_assets - Current_liabilities) - (Current_assets_start - Current_liabilities_start)) - (Property_plant_and_equipment - Property_plant_and_equipment_start)
print(FCF_0_VALUE)
FCF_1_VALUE = Revenues - (Amortization/2) - ((Current_assets - Current_liabilities) - (Current_assets_start - Current_liabilities_start)) - (Property_plant_and_equipment - Property_plant_and_equipment_start)
print(FCF_1_VALUE)
#R&D
print(Research_Development)
### USE BOTH 'Outstanding_shares' & 'Class_A_and_Class_B_stocks' FOR RELATED RATIOS?
#############################################################################
# UPDATED :: Ratios Equation & Formula
# () PE_1_VALUE = Avg_P / (Net_income / Outstanding_shares) / 1000
##
PE_2_2_VALUE = Avg_P / EPS_VALUE
#print(PE_2_2_VALUE)
# (CSCF) EBIT_VALUE = Net_income_CSCF && (CSI) EBIT_VALUE_0 = Income_before_taxes
EBIT_VALUE_0 = Income_before_taxes
#print(EBIT_VALUE_0)
# (CSCF) EBITDA_VALUE = EBIT_VALUE - Depreciation - Amortization
EBITDA_VALUE_0 = EBIT_VALUE_0 - Depreciation - Amortization
#print(EBITDA_VALUE_0)
# ()
EV_1_EBITDA_VALUE_0 = EV_1_VALUE / EBITDA_VALUE_0
#print(EV_1_EBITDA_VALUE_0)
EV_2_EBITDA_VALUE_0 = EV_2_VALUE / EBITDA_VALUE_0
#print(EV_2_EBITDA_VALUE_0)
# ()
DE_VALUE_0 = (Long_term_debt + Short_term_debt) / EBITDA_VALUE_0
#print(DE_VALUE_0)
NE_VALUE_0 = (Long_term_debt + Short_term_debt - Cash_and_cash_equivalents) / EBITDA_VALUE_0
#print(NE_VALUE_0)
# ()
#Div_VALUE_0 = Net_income - (EPS_VALUE * Outstanding_shares)
#print(Div_VALUE_0)
Div_VALUE_1 = Net_income - (Basic_EPS * Outstanding_shares)
#print(Div_VALUE_1)
Div_VALUE_2 = Net_income - (Diluted_EPS * Outstanding_shares)
#print(Div_VALUE_2)
#print('\n')
# ()
#DPS_7 = EPS_VALUE * (Div_VALUE / EPS_VALUE)
DPS_00 = EPS_VALUE * (Div_VALUE / Net_income)
#print(DPS_00)
#DPS_01 = EPS_VALUE * (Div_VALUE_0 / Net_income)
#print(DPS_01)
DPS_02 = EPS_VALUE * (Div_VALUE_1 / Net_income)
#print(DPS_02)
DPS_03 = EPS_VALUE * (Div_VALUE_2 / Net_income)
#print(DPS_03)
DPS_10 = Basic_EPS * (Div_VALUE / Net_income)
#print(DPS_10)
#DPS_11 = Basic_EPS * (Div_VALUE_0 / Net_income)
#print(DPS_11)
DPS_12 = Basic_EPS * (Div_VALUE_1 / Net_income)
#print(DPS_12)
DPS_13 = Basic_EPS * (Div_VALUE_2 / Net_income)
#print(DPS_13)
DPS_20 = Diluted_EPS * (Div_VALUE / Net_income)
#print(DPS_20)
#DPS_21 = Diluted_EPS * (Div_VALUE_0 / Net_income)
#print(DPS_21)
DPS_22 = Diluted_EPS * (Div_VALUE_1 / Net_income)
#print(DPS_22)
DPS_23 = Diluted_EPS * (Div_VALUE_2 / Net_income)
#print(DPS_23)
#DPS_30 = Div_VALUE / Outstanding_shares
#print(DPS_30)
#DPS_31 = Div_VALUE_0 / Outstanding_shares
#print(DPS_31)
#DPS_32 = Div_VALUE_1 / Outstanding_shares
#print(DPS_32)
#DPS_33 = Div_VALUE_2 / Outstanding_shares
#print(DPS_33)
#print('\n')
# ()
DY_2_VALUE_00 = DPS_00 / Avg_P #Units
#DY_2_VALUE_01 = DPS_01 / Avg_P
DY_2_VALUE_02 = DPS_02 / Avg_P
DY_2_VALUE_03 = DPS_03 / Avg_P
#print(DY_2_VALUE_00)
#print(DY_2_VALUE_01)
#print(DY_2_VALUE_02)
#print(DY_2_VALUE_03)
DY_2_VALUE_04 = DPS_10 / Avg_P
#DY_2_VALUE_05 = DPS_11 / Avg_P
DY_2_VALUE_06 = DPS_12 / Avg_P
DY_2_VALUE_07 = DPS_13 / Avg_P
#print(DY_2_VALUE_04)
#print(DY_2_VALUE_05)
#print(DY_2_VALUE_06)
#print(DY_2_VALUE_07)
DY_2_VALUE_08 = DPS_20 / Avg_P
#DY_2_VALUE_09 = DPS_21 / Avg_P
DY_2_VALUE_10 = DPS_22 / Avg_P
DY_2_VALUE_11 = DPS_23 / Avg_P
#print(DY_2_VALUE_08)
#print(DY_2_VALUE_09)
#print(DY_2_VALUE_10)
#print(DY_2_VALUE_11)
#DY_2_VALUE_12 = DPS_30 / Avg_P
#DY_2_VALUE_13 = DPS_31 / Avg_P
#DY_2_VALUE_14 = DPS_32 / Avg_P
#DY_2_VALUE_15 = DPS_33 / Avg_P
#print(DY_2_VALUE_12)
#print(DY_2_VALUE_13)
#print(DY_2_VALUE_14)
#print(DY_2_VALUE_15)
#print('\n')
# ()
# Following MCAP uses '*' rather than P/O_S Ratio
#MCAP_VALUE_0 = Outstanding_shares * Avg_P
#print(MCAP_VALUE_0)
# ()
#FCF_0_VALUE = EBIT_VALUE_0 * (1 - Tax_VALUE) - (Amortization/2) - ((Current_assets - Current_liabilities) - (Current_assets_start - Current_liabilities_start)) - (Property_plant_and_equipment - Property_plant_and_equipment_start)
#print(FCF_0_VALUE)
#print('\n')
# ()
### CHECK FOR (Net Income (I/S)) //DONE//
### CHECK FOR [Outstanding_shares] [Dividends] [EPS && DPS] [ADD AND USE ALL SAME VARIABLES OF DIFFERENT VALUE(S)]
# Book Value BV
BV_0 = Total_assets - Total_liabilities
#print(BV_0)
#print('\n')
#GPM_0 = Gross Profit / Sales
GPM_0 = (Revenues - Cost_of_revenues) / Revenues
#print(GPM_0)
# Operating Profit Margin
OPM_0 = Income_before_taxes / Revenues
#print(OPM_0)
# Profit Margin
PM_0 = Net_income / Revenues
#print(PM_0)
# Return on Assets
ROA_0 = Net_income / Total_assets
#print(ROA_0)
# Return on Investments
ROI_0 = Net_income / Total_stockholder_equity
#print(ROI_0)
# Return on Capital
ROC_0 = Net_income / (Total_stockholder_equity + Short_term_debt + Long_term_debt)
#print(ROC_0)
# ADD FORMULAS BELOW
#ROC_1 = (Price [Year] – Price [Previous Year]) / Price [Previous Year]
#print(ROC_1)
#ROC_2 = (Price [Year] – Price [Base Year]) / Price [Base Year]
#print(ROC_2)
# Return on Capital Employed
ROCE_0 = Operating_income / (Total_assets - Current_liabilities)
#print(ROCE_0)
#print('\n')
# Operating Profit
# ADD FORMULA BELOW
#OP_0 = Revenue – Operating Costs – COGS – Other Day-to-day Expenses
#print()
#OP_0 = Income_before_taxes
#print(OP_0)
# Dividend Pay-out
#DPO_0 = Div_VALUE / Net_income
#DPO_1 = Div_VALUE_0 / Net_income
#DPO_2 = Div_VALUE_1 / Net_income
#DPO_3 = Div_VALUE_2 / Net_income
#print(DPO_0)
#print(DPO_1)
#print(DPO_2)
#print(DPO_3)
#print('\n')
# Preferred Stock Dividends
PSD_0 = Net_income - Outstanding_shares
#print(PSD_0)
# Retention Ratio
RR_0 = (Net_income - Div_VALUE) / Net_income
#RR_1 = (Net_income - Div_VALUE_0) / Net_income
RR_2 = (Net_income - Div_VALUE_1) / Net_income
RR_3 = (Net_income - Div_VALUE_2) / Net_income
#print(RR_0)
#print(RR_1)
#print(RR_2)
#print(RR_3)
#print('\n')
# Market Value per Share
MVPS_0 = MCAP_VALUE / Outstanding_shares
#print(MVPS_0)
#MVPS_1 = MCAP_VALUE_0 / Outstanding_shares
#print(MVPS_1)
MVPS_PRICE_0 = MVPS_0 / Avg_P
#print(MVPS_PRICE_0)
#MVPS_PRICE_1 = MVPS_1 / Avg_P
#print(MVPS_PRICE_1)
# Price Earnings
PE_00 = MVPS_0 / EPS_VALUE
#print(PE_00)
PE_01 = MVPS_0 / Basic_EPS
#print(PE_01)
PE_02 = MVPS_0 / Diluted_EPS
#print(PE_02)
#PE_10 = MVPS_1 / EPS_VALUE
#print(PE_10)
#PE_11 = MVPS_1 / Basic_EPS
#print(PE_11)
#PE_12 = MVPS_1 / Diluted_EPS
#print(PE_12)
#print('\n')
# Sustainable Growth Rate
SGR_0 = ROE_VALUE * RR_0
#print(SGR_0)
#SGR_1 = ROE_VALUE * RR_1
#print(SGR_1)
SGR_2 = ROE_VALUE * RR_2
#print(SGR_2)
SGR_3 = ROE_VALUE * RR_3
#print(SGR_3)
#print('\n')
# Book Value per Share
BVPS_0 = Total_stockholder_equity / Outstanding_shares
#print(BVPS_0)
#print('\n')
# Market Book
MB_0 = (MCAP_VALUE / Outstanding_shares) / BVPS_0
#print(MB_0)
#MB_1 = (MCAP_VALUE_0 / Outstanding_shares) / BVPS_0
#print(MB_1)
#MB_2 = Avg_P / BVPS_0
#print(MB_2)
#print('\n')
# New Dividend Yield (DY) Formulas using MVPS
DY_0 = DPS_VALUE / MVPS_0
#print(DY_0)
DY_0_00 = DPS_00 / MVPS_0
#DY_0_01 = DPS_01 / MVPS_0
DY_0_02 = DPS_02 / MVPS_0
DY_0_03 = DPS_03 / MVPS_0
#print(DY_0_00)
#print(DY_0_01)
#print(DY_0_02)
#print(DY_0_03)
DY_0_04 = DPS_10 / MVPS_0
#DY_0_05 = DPS_11 / MVPS_0
DY_0_06 = DPS_12 / MVPS_0
DY_0_07 = DPS_13 / MVPS_0
#print(DY_0_04)
#print(DY_0_05)
#print(DY_0_06)
#print(DY_0_07)
DY_0_08 = DPS_20 / MVPS_0
#DY_0_09 = DPS_21 / MVPS_0
DY_0_10 = DPS_22 / MVPS_0
DY_0_11 = DPS_23 / MVPS_0
#print(DY_0_08)
#print(DY_0_09)
#print(DY_0_10)
#print(DY_0_11)
#DY_0_12 = DPS_30 / MVPS_0
#DY_0_13 = DPS_31 / MVPS_0
#DY_0_14 = DPS_32 / MVPS_0
#DY_0_15 = DPS_33 / MVPS_0
#print(DY_0_12)
#print(DY_0_13)
#print(DY_0_14)
#print(DY_0_15)
#print('\n')
# ()
#DY_1 = DPS_VALUE / MVPS_1
#print(DY_1)
#DY_1_00 = DPS_00 / MVPS_1
#DY_1_01 = DPS_01 / MVPS_1
#DY_1_02 = DPS_02 / MVPS_1
#DY_1_03 = DPS_03 / MVPS_1
#print(DY_1_00)
#print(DY_1_01)
#print(DY_1_02)
#print(DY_1_03)
#DY_1_04 = DPS_10 / MVPS_1
#DY_1_05 = DPS_11 / MVPS_1
#DY_1_06 = DPS_12 / MVPS_1
#DY_1_07 = DPS_13 / MVPS_1
#print(DY_1_04)
#print(DY_1_05)
#print(DY_1_06)
#print(DY_1_07)
#DY_1_08 = DPS_20 / MVPS_1
#DY_1_09 = DPS_21 / MVPS_1
#DY_1_10 = DPS_22 / MVPS_1
#DY_1_11 = DPS_23 / MVPS_1
#print(DY_1_08)
#print(DY_1_09)
#print(DY_1_10)
#print(DY_1_11)
#DY_1_12 = DPS_30 / MVPS_1
#DY_1_13 = DPS_31 / MVPS_1
#DY_1_14 = DPS_32 / MVPS_1
#DY_1_15 = DPS_33 / MVPS_1
#print(DY_1_12)
#print(DY_1_13)
#print(DY_1_14)
#print(DY_1_15)
#print('\n')
Equity_multiplier = Total_assets / Total_stockholder_equity
#print(Equity_multiplier)
Total_asset_turnover = Revenues / Total_assets
#print(Total_asset_turnover)
# · Apply YOY, HA && VA Analysis
## TO BE CONTINUED (DEFINE SINGLE VARIABLES)
## ADD LIQUIDITY [LEVERAGE], SOLVENCY [LEVERAGE] AND EFFICIENCY RATIO [BS] [CSI]
#Liquidity Ratio [BS] [CSI]
#Current Ratio = Current Assets / Current Liabilities
Cur_R = Current_assets / Current_liabilities
print(Cur_R)
#Quick Ratio = (Current Assets - Inventory) / Current Liabilities
Inventory = Total_assets - Cash_and_cash_equivalents - Accounts_receivable
QR = (Current_assets - Inventory) / Current_liabilities
print(QR)
#Cash Ratio = Cash / Current Liabilities
Cas_R = Cash_and_cash_equivalents / Current_liabilities
print(Cas_R)
#Net Worth Capital (NWC) = Current Assets - Current Liabilities
NWC = Current_assets - Current_liabilities
print(NWC)
#NWC to Total Assets = NWC / Total Assets
NWC_TA = NWC / Total_assets
print(NWC_TA)
#Interval Measure = (Current Assets / Average Daily Operating Costs) / [Current Assets / [(COGS + Operating Expense) / 365]]
IM = (Current_assets / Operating_income) / (Current_assets / ((Cost_of_revenues + Operating_income) / 365))
print(IM)
print('\n')
#Solvency Ratios [BS] [CSI]
#Debt Ratio (DR) = Total Debt / Total Assets
Total_debt = Short_term_debt + Long_term_debt
DR = Total_debt / Total_assets
print(DR)
#Equity Ratio = Total Equity / Total Assets
ER = Total_stockholder_equity / Total_assets
print(ER)
#Debt-to-Equity Ratio = Total Debt / Total Equity
DER = Total_debt / Total_stockholder_equity
print(DER)
#Equity Multiplier (Financial Leverage) = Total Assets / Total Equity
EM = Total_assets / Total_stockholder_equity
print(EM)
#Fixed to Equity = Net Fixed Assets / Total Equity
NFA = Total_assets - Current_assets
FE = NFA / Total_stockholder_equity
print(FE)
#Long-Term Debt Ratio = Long-Term Debt / (Long-Term Debt + Total Equity)
LTDR = Long_term_debt / (Long_term_debt + Total_stockholder_equity)
print(LTDR)
#Times Interest Earned (Interest Coverage) = EBIT / Interest
Interest = - (Operating_income - Income_before_taxes)
TIE = Operating_income / Interest
print(TIE)
#Cash Coverage = (EBIT + Depreciation) / Interest
CC = (Operating_income + Depreciation) / Interest
print(CC)
#Debt Service Coverage Ratio = (Operating Income / Total Debt Service Costs) / [Operating Income / (Interest + Principal)]
Principal = Current_liabilities - Short_term_debt
TDS = (Interest * (1 - Tax_VALUE)) + Principal
DSCR = (Operating_income / TDS) / (Operating_income / (Interest + Principal))
print(DSCR)
print('\n')
#Efficiency Ratios [BS] [CSI]
#Inventory Turnover = COGS / Inventory
IT = Cost_of_revenues / Inventory
print(IT)
#Day’s Sales in Inventory (Days Inventory Outstanding, or DIO) = 365 / Inventory Turnover
DSI = 365 / IT
print(DSI)
#Receivables Turnover = Sales / Accounts Receivables
Sales = Revenues - Operating_income - Cost_of_revenues - Research_Development
RT = Sales / Accounts_receivable
print(RT)
#Day’s Sales in Receivables (Days Sales Outstanding, or DIO) = 365 / Receivables Turnover
DSR = 365 / RT
print(DSR)
#Payables Turnover = COGS / Accounts Payable
# Retrieve Accounts Payable from [BS]
#PT = Cost_of_revenues / (...)
#Day’s Sales in Payables (Days Payables Outstanding, or DIO) = 365 / Payables Turnover
#Cash Conversion Cycle (CCC) = Day’s Sales in Inventory + Day’s Sales in Receivables - Day’s Sales in Payables
#Total Asset Turnover (TATO) = Sales / Total Assets
TATO = Sales / Total_assets
print(TATO)
#Capital Intensity = Total Assets / Sales
CI = Total_assets / Sales
print(CI)
#Fixed Asset Turnover = Sales / Net Fixed Assets
FAT = Sales / NFA
print(FAT)
#NWC Turnover = Sales / NWC
NWCT = Sales / NWC
print(NWCT)
## NOTES
# Separate previous RATIOS into two sections: PROFITABILITY and MARKET VALUE RATIOS.
# Separate new RATIOS into two, and three sub-sections.
# Loop over all data
# VERIFY ALL PROCESSED DATA (e.g. Date[1], Date[n-1], Date[n])
# Start using PMP Course to write BUSINESS PLAN & STRATEGY intended for new STAKEHOLDERS.
# Finish building prototype for all 10 companies.
EBIT_DEFAULT = Operating_income
print(EBIT_DEFAULT) #Market Value
EBITDA_DEFAULT = EBIT_DEFAULT - Depreciation - Amortization
print(EBITDA_DEFAULT) #Market Value
# ()
EV_1_EBITDA_DEFAULT = EV_1_VALUE / EBITDA_DEFAULT
print(EV_1_EBITDA_DEFAULT) #Market Value
EV_2_EBITDA_DEFAULT = EV_2_VALUE / EBITDA_DEFAULT
print(EV_2_EBITDA_DEFAULT) #Market Value
print('\n')
# ()
DE_DEFAULT = (Long_term_debt + Short_term_debt) / EBITDA_DEFAULT
print(DE_DEFAULT) #Solvency [Leverage]
NE_DEFAULT = (Long_term_debt + Short_term_debt - Cash_and_cash_equivalents) / EBITDA_DEFAULT
print(NE_DEFAULT) #Solvency [Leverage]
FCF_3_VALUE = EBIT_VALUE_0 * (1 - Tax_VALUE) - (Amortization/2) - ((Current_assets - Current_liabilities) - (Current_assets_start - Current_liabilities_start)) - (Property_plant_and_equipment - Property_plant_and_equipment_start)
print(FCF_3_VALUE) #Solvency [Leverage]
FCF_DEFAULT = EBIT_DEFAULT * (1 - Tax_VALUE) - (Amortization/2) - ((Current_assets - Current_liabilities) - (Current_assets_start - Current_liabilities_start)) - (Property_plant_and_equipment - Property_plant_and_equipment_start)
print(FCF_DEFAULT) #Solvency [Leverage]
### USE BOTH 'Outstanding_shares' & 'Class_A_and_Class_B_stocks' FOR RELATED RATIOS?
#print('\n')
d = {input_date_0[:-6]: [
# PROFITABILITY
'GPM_0', #Profitability
'OPM_0', #Profitability
'OPER_MARGIN_VALUE', #Profitability
'PM_0', #Profitability
'ROA_0', #Profitability
'ROI_0', #Profitability
'ROE_VALUE', #Profitability
'ROC_0', #Profitability
'ROCE_0', #Profitability
# MARKET VALUE MEASURES
'EV_1_VALUE', #Market Value
'EV_2_VALUE', #Market Value
'EBIT_DEFAULT', ##############################
'EBIT_VALUE', #Market Value
'EBIT_VALUE_0', #Market Value
'EBITDA_DEFAULT', ##############################
'EBITDA_VALUE', #Market Value
'EBITDA_VALUE_0', #Market Value
'EV_1_EBITDA_VALUE', #Market Value
'EV_2_EBITDA_VALUE', #Market Value
'EV_1_EBITDA_VALUE_0', #Market Value
'EV_2_EBITDA_VALUE_0', #Market Value
'EV_1_EBITDA_DEFAULT', ##############################
'EV_2_EBITDA_DEFAULT', ##############################
'EPS_VALUE', #Market Value
'DPS_VALUE', #Market Value
'DPS_00', #Market Value
'DPS_02', #Market Value
'DPS_03', #Market Value
'DPS_10', #Market Value
'DPS_12', #Market Value
'DPS_13', #Market Value
'DPS_20', #Market Value
'DPS_22', #Market Value
'DPS_23', #Market Value
'PE_1_VALUE', #Market Value
'PE_2_0_VALUE', #Market Value
'PE_2_1_VALUE', #Market Value
'PE_00', #Market Value
'PE_01', #Market Value
'PE_02', #Market Value
############################## MISSING DPO ##############################
'RR_0', #Market Value
'RR_2', #Market Value
'RR_3', #Market Value
'DY_1_VALUE', #Market Value
'DY_2_VALUE', #Market Value
'DY_2_VALUE_00', #Market Value
'DY_2_VALUE_02', #Market Value
'DY_2_VALUE_03', #Market Value
'DY_2_VALUE_04', #Market Value
'DY_2_VALUE_06', #Market Value
'DY_2_VALUE_07', #Market Value
'DY_2_VALUE_08', #Market Value
'DY_2_VALUE_10', #Market Value
'DY_2_VALUE_11', #Market Value
'DY_0', #Market Value
'DY_0_00', #Market Value
'DY_0_02', #Market Value
'DY_0_03', #Market Value
'DY_0_04', #Market Value
'DY_0_06', #Market Value
'DY_0_07', #Market Value
'DY_0_08', #Market Value
'DY_0_10', #Market Value
'DY_0_11', #Market Value
'Div_VALUE', #Market Value
'Div_VALUE_1', #Market Value
'Div_VALUE_2', #Market Value
'PSD_0', #Market Value
'MCAP_VALUE', #Market Value
'BV_0', #Market Value
'MB_0', #Market Value
'MVPS_0', #Market Value
'MVPS_PRICE_0', #Market Value
'BVPS_0', #Market Value
'SGR_0', #Market Value
'SGR_2', #Market Value
'SGR_3', #Market Value
'RD_VALUE', #Market Value
# LEVERAGE
# i) LIQUIDITY RATIOS
'Cur_R_VALUE', #Liquidity [Leverage]
'QR_1_VALUE', #Liquidity [Leverage]
'QR_2_VALUE', #Liquidity [Leverage]
'QR_VALUE', #Liquidity [Leverage]
'Cas_R_VALUE', #Liquidity [Leverage]
'NWC_VALUE', #Liquidity [Leverage]
'NWC_TA_VALUE', #Liquidity [Leverage]
'IM_VALUE', #Liquidity [Leverage]
# ii) SOLVENCY RATIOS
'DE_VALUE', #Solvency [Leverage]
'DE_VALUE_0', #Solvency [Leverage]
'NE_VALUE_0', #Solvency [Leverage]
'DE_DEFAULT', ##############################
'NE_DEFAULT', ##############################
'DR_VALUE', #Solvency [Leverage]
'ER_VALUE', #Solvency [Leverage]
'DER_VALUE', #Solvency [Leverage]
'EM_VALUE', #Solvency [Leverage]
'FE_VALUE', #Solvency [Leverage]
'LTDR_VALUE', #Solvency [Leverage]
'TIE_VALUE', #Solvency [Leverage]
'CC_VALUE', #Solvency [Leverage]
'DSCR_VALUE', #Solvency [Leverage]
'EQUITY_MULTIPLIER', #Solvency [Leverage]
'Tax_VALUE', #Solvency [Leverage]
'FCF_0_VALUE', #Solvency [Leverage]
'FCF_1_VALUE', #Solvency [Leverage]
'FCF_2_VALUE', #Solvency [Leverage]
'FCF_3_VALUE', ##############################
'FCF_DEFAULT', ##############################
# EFFICIENCY
'IT_VALUE', #Efficiency
'DSI_VALUE', #Efficiency
'RT_VALUE', #Efficiency
'DSR_VALUE', #Efficiency
'TOT_ASSET_TURNOVER', #Efficiency
'TATO_VALUE', #Efficiency
'CI_VALUE', #Efficiency
'FAT_VALUE', #Efficiency
'NWCT_VALUE'], #Efficiency
##########################################################################################
'Financial Ratio /unit': [
# PROFITABILITY
GPM_0, #Profitability
OPM_0, #Profitability
OperatingMargin_VALUE, #Profitability
PM_0, #Profitability
ROA_0, #Profitability
ROI_0, #Profitability
ROE_VALUE, #Profitability
ROC_0, #Profitability
ROCE_0, #Profitability
# MARKET VALUE MEASURES
EV_1_VALUE, #Market Value
EV_2_VALUE, #Market Value
EBIT_DEFAULT, ##############################
EBIT_VALUE, #Market Value
EBIT_VALUE_0, #Market Value
EBITDA_DEFAULT, ##############################
EBITDA_VALUE, #Market Value
EBITDA_VALUE_0, #Market Value
EV_1_EBITDA_VALUE, #Market Value
EV_2_EBITDA_VALUE, #Market Value
EV_1_EBITDA_VALUE_0, #Market Value
EV_2_EBITDA_VALUE_0, #Market Value
EV_1_EBITDA_DEFAULT, ##############################
EV_2_EBITDA_DEFAULT, ##############################
EPS_VALUE, #Market Value
DPS_VALUE, #Market Value
DPS_00, #Market Value
DPS_02, #Market Value
DPS_03, #Market Value
DPS_10, #Market Value
DPS_12, #Market Value
DPS_13, #Market Value
DPS_20, #Market Value
DPS_22, #Market Value
DPS_23, #Market Value
PE_1_VALUE, #Market Value
PE_2_0_VALUE, #Market Value
PE_2_1_VALUE, #Market Value
PE_00, #Market Value
PE_01, #Market Value
PE_02, #Market Value
############################## MISSING DPO ##############################
RR_0, #Market Value
RR_2, #Market Value
RR_3, #Market Value
DY_1_VALUE, #Market Value
DY_2_VALUE, #Market Value
DY_2_VALUE_00, #Market Value
DY_2_VALUE_02, #Market Value
DY_2_VALUE_03, #Market Value
DY_2_VALUE_04, #Market Value
DY_2_VALUE_06, #Market Value
DY_2_VALUE_07, #Market Value
DY_2_VALUE_08, #Market Value
DY_2_VALUE_10, #Market Value
DY_2_VALUE_11, #Market Value
DY_0, #Market Value
DY_0_00, #Market Value
DY_0_02, #Market Value
DY_0_03, #Market Value
DY_0_04, #Market Value
DY_0_06, #Market Value
DY_0_07, #Market Value
DY_0_08, #Market Value
DY_0_10, #Market Value
DY_0_11, #Market Value
Div_VALUE, #Market Value
Div_VALUE_1, #Market Value
Div_VALUE_2, #Market Value
PSD_0, #Market Value
MCAP_VALUE, #Market Value
BV_0, #Market Value
MB_0, #Market Value
MVPS_0, #Market Value
MVPS_PRICE_0, #Market Value
BVPS_0, #Market Value
SGR_0, #Market Value
SGR_2, #Market Value
SGR_3, #Market Value
Research_Development, #Market Value
# LEVERAGE
# i) LIQUIDITY RATIOS
Cur_R, #Liquidity [Leverage]
QR_1_VALUE, #Liquidity [Leverage]
QR_2_VALUE, #Liquidity [Leverage]
QR, #Liquidity [Leverage]
Cas_R, #Liquidity [Leverage]
NWC, #Liquidity [Leverage]
NWC_TA, #Liquidity [Leverage]
IM, #Liquidity [Leverage]
# ii) SOLVENCY RATIOS
DE_VALUE, #Solvency [Leverage]
DE_VALUE_0, #Solvency [Leverage]
NE_VALUE_0, #Solvency [Leverage]
DE_DEFAULT, ##############################
NE_DEFAULT, ##############################
DR, #Solvency [Leverage]
ER, #Solvency [Leverage]
DER, #Solvency [Leverage]
EM, #Solvency [Leverage]
FE, #Solvency [Leverage]
LTDR, #Solvency [Leverage]
TIE, #Solvency [Leverage]
CC, #Solvency [Leverage]
DSCR, #Solvency [Leverage]
Equity_multiplier, #Solvency [Leverage]
Tax_VALUE, #Solvency [Leverage]
FCF_0_VALUE, #Solvency [Leverage]
FCF_1_VALUE, #Solvency [Leverage]
FCF_2_VALUE, #Solvency [Leverage]
FCF_3_VALUE, ##############################
FCF_DEFAULT, ##############################
# EFFICIENCY
IT, #Efficiency
DSI, #Efficiency
RT, #Efficiency
DSR, #Efficiency
Total_asset_turnover, #Efficiency
TATO, #Efficiency
CI, #Efficiency
FAT, #Efficiency
NWCT #Efficiency
]}
df= pd.DataFrame(data=d)
#print(df)
filename = input_date_0[:-6] + "-Ratio-DF.csv"
df.to_csv(filename)
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tables
======
The table generation process use a :class:`TableGenerator` to gather all the
cells of the table. The methods :meth:`TableGenerator.get_table` and
:meth:`TableGenerator.get_long_table` returns the flowable objects to add to
the story.
Those methods take a list of styles, used to create a
:class:`reportlab.platypus.TableStyle` object for the table. Those styles are
are tuples as defined by :mod:`reportlab`.
The :class:`Styles` objects and its default instantiation :data:`styles` are
shortcut to :mod:`reportlab` table styles in a less obnoxious interface.
Example:
>>> gen = TableGenerator()
>>> gen.append([
... 'Column 1',
... 'Column 2',
... ])
>>> gen.extend(datas)
>>> story.append(gen.get_long_table(
... styles.first_row.TextColor(colors.red),
... styles.first_col.Alignment('RIGHT'),
... ))
.. data:: styles
A shortcut to create :mod:`reportlab` table styles. All attributes of
styles objects correspond to table styles directive and the arguments are
the parameters of thoses table styles.
Arguments are concatenated and may be applied before or after setting the
directive name.
Those 4 directives are equivalent:
>>> styles((-1, 0), (-1, -1), 1, colors.black).Grid()
>>> styles.Grid((-1, 0), (-1, -1), 1, colors.black)
>>> styles((-1, 0), (-1, -1)).Grid(1, colors.Black)
>>> ('GRID', (-1, 0), (-1, -1), 1, colors.black)
This allow to create predefined sections of the grid and apply different
styles to those sections.
The sections **first_row**, **first_col**, **last_row**, **last_col** are
predefined styles for respectively the first row, the first column, the
last row and the last column. **first_rows**, **last_rows**,
**first_cols**, **last_cols** are function matching the first and last columns
or rows up the given value.
Those 3 directives are equivalent:
>>> styles((-2, 0), (-1, -1)).Background(colors.red)
>>> styles.last_cols(2).Background(colors.red)
>>> ('BACKGROUND', (-2, 0), (-1, -1), colors.red)
The **all** attributes is a shorcut to a section for all the table.
A special **styles.grid** style is defined and mainly designed for quick
debugging more than intended as a production value. It defines a black grid
on each cell of the array.
"""
from __future__ import absolute_import
import collections
from reportlab.lib import colors
from reportlab.platypus import (
Table,
LongTable,
TableStyle as RootTableStyle,
)
__all__ = [
'TableGenerator',
'TableStyle',
'styles'
]
class TableGenerator(collections.MutableSequence):
"""
A Generator of :class:`Table` and :class:`LongTable`.
This object is a mutable sequence and supports all the access as a list to
add values.
"""
def __init__(self, *styles, **kw):
self.content = list()
style = kw.pop('style', None)
if styles:
style = RootTableStyle(styles, parent=style)
self.base_style = style
self.default_kw = kw
def __len__(self):
return len(self.content)
def __iter__(self):
return iter(self.content)
def __setitem__(self, index, value):
self.content[index] = value
def __getitem__(self, index):
return self.content[index]
def __delitem__(self, index):
del self.content[index]
def insert(self, index, value):
self.content.insert(index, value)
def _build(self, cls, style, kw):
if style:
style = RootTableStyle(style, parent=self.base_style)
else:
style = self.base_style
h_align = kw.pop('hAlign', None)
keywords = dict(self.default_kw)
kw['style'] = style
keywords.update(kw)
table = cls(self.content, **keywords)
if h_align is not None:
table.hAlign = h_align
return table
def get_table(self, *style, **kw):
"""
Returns the table as a :class:`reportlab.platypus.Table`
"""
return self._build(Table, style, kw)
def get_long_table(self, *style, **kw):
"""
Returns the table as a :class:`reportlab.platypus.LongTable`.
The :class:`LongTable` is recommended for bigger tables.
"""
return self._build(LongTable, style, kw)
class FormattedTableGenerator(TableGenerator):
"""
:class:`TableGenerator` that formats the rows appended with format strings.
**formats** is a list of ``None`` or template strings. ``None`` means that
the values is copied as given. Template strings are format strings
compatible with the builtin :func:`format` function.
>>> ftg = FormattedTableGenerator([None, '.2f', '.0%'])
>>> ftg.append_raw(['name', 'Value', 'Rate'])
>>> ftg.append(['value1', 513.492, 0.03])
>>> ftg.append(['value2', 1016.2, 0.43])
>>> ftg.get_table()
Giving::
Name | Value | Rate
value1 | 513.49 | 3%
value2 | 1016.20 | 43%
"""
def __init__(self, formats, *args, **kw):
super(FormattedTableGenerator, self).__init__(*args, **kw)
self.formats = formats
def append_raw(self, values):
super(FormattedTableGenerator, self).append(values)
def append(self, values):
formatted = [v if f is None else format(v, f)
for (f, v) in zip(self.formats, values)]
super(FormattedTableGenerator, self).append(formatted)
class TableStyle(RootTableStyle, object):
def __init__(self, *args):
super(TableStyle, self).__init__(args)
class Styles(object):
known_styles = {
# borders
'grid': 4,
'box': 4,
'linebelow': 4,
'lineafter': 4,
'background': 3,
'valign': 3,
'textcolor': 3,
'span': 2,
}
def __init__(self, *args):
self.args = args
def __repr__(self):
return 'Styles({})'.format(', '.join(repr(x) for x in self.args))
def __getattr__(self, name):
return CellsStyle(name, self.known_styles.get(name.lower()), self.args)
def __eq__(self, other):
if isinstance(other, Styles):
other = other.args
elif not isinstance(other, (tuple, list)):
return NotImplemented
return self.args == other
class CellsStyle(object):
def __init__(self, name, argc, args):
self.name = name.upper()
self.argc = argc
self.args = args
def __repr__(self):
return '{}({})'.format(self.name.capitalize(), self.args)
def __call__(self, *args):
args = self.args + args
if self.argc is not None and len(args) != self.argc:
raise TypeError('{2}: Expecting {0} arguments, got {1}'.format(
self.argc, len(args), self.name))
return (self.name, ) + args
def __eq__(self, other):
return (isinstance(other, CellsStyle) and
self.name == other.name and
self.args == other.args)
class AllStyles(Styles):
@property
def grid(self):
return self.all.Grid(1, colors.black)
@property
def all(self):
return self.cols(0, -1)
@property
def first_row(self):
return self.rows(0, 0)
def first_rows(self, n):
return self.rows(0, n)
def row(self, n):
return self.rows(n, n)
def rows(self, first, last):
return Styles((0, first), (-1, last))
@property
def last_row(self):
return self.rows(-1, -1)
def last_rows(self, n):
return self.rows(-n, -1)
@property
def first_col(self):
return self.cols(0, 0)
def first_cols(self, n):
return self.cols(0, n)
def col(self, n):
return self.cols(n, n)
def cols(self, first, last):
return Styles((first, 0), (last, -1))
@property
def last_col(self):
return self.cols(-1, -1)
def last_cols(self, n):
return self.cols(-n, -1)
styles = AllStyles()
|
from __future__ import absolute_import
import io
import json
import os
import subprocess
import sys
import uuid
from . import __version__
from .support import Popen
class CredentialProvider(object):
def __init__(self):
if sys.platform.startswith("win"):
self.exe = [
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"plugins",
"netfx",
"CredentialProvider.Microsoft",
"CredentialProvider.Microsoft.exe",
)
]
else:
try:
from dotnetcore2.runtime import get_runtime_path
except ImportError:
get_runtime_path = lambda: "dotnet"
self.exe = [
get_runtime_path(),
"exec",
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"plugins",
"netcore",
"CredentialProvider.Microsoft",
"CredentialProvider.Microsoft.dll",
),
]
self.proc = p = Popen(
self.exe + ["-P"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
inputs = io.TextIOWrapper(p.stdout, "utf-8", "replace")
self._in = (json.loads(d) for d in inputs)
self._messages = {}
def _kill(self):
p, self.proc = self.proc, None
if p:
try:
p.kill()
except OSError:
pass
def _read1(self, method):
pending = self._messages.get(method)
if pending:
return pending.pop(0)
for msg in self._in:
if msg["Type"] == "Progress":
self.send(**msg)
continue
if msg["Method"] == method:
return msg
self._messages.setdefault(msg["Method"], []).append(msg)
def _read(self, method):
msg = self._read1(method)
if msg and msg["Type"] == "Fault":
raise RuntimeError(msg["Payload"].get("Message", msg))
return msg
def __enter__(self):
if not self.proc:
raise RuntimeError("process already terminated")
if not self.handshake():
self._kill()
raise RuntimeError("failed to complete handshake")
if not self.initialize():
self._kill()
raise RuntimeError("failed to initialize")
return self
def __exit__(self, ex_type, ex_value, ex_tb):
self.send(Method="Close", Type="Request")
def send(self, **kwargs):
kwargs.setdefault("RequestId", str(uuid.uuid4()))
data = (json.dumps(kwargs) + "\r\n").encode("utf-8", "strict")
p = self.proc
if p:
p.stdin.write(data)
p.stdin.flush()
def reply(self, req, **kwargs):
return self.send(
RequestId=req["RequestId"],
Method=req["Method"],
Type="Response",
Payload=kwargs,
)
def handshake(self):
while True:
req = self._read("Handshake")
if not req:
return
if req["Type"] == "Request":
protocol = req["Payload"]["MinimumProtocolVersion"]
if protocol.startswith(("1.", "2.")):
self.reply(req, ResponseCode="Success", ProtocolVersion="2.0.0")
return True
else:
self.reply(ResponseCode="Error")
raise RuntimeError("Cannot negotiate protocol")
if req["Type"] == "Response":
if req["Payload"]["ResponseCode"] == "Success":
return True
raise RuntimeError(req["Payload"])
raise RuntimeError(req)
def initialize(self):
self.send(
Type="Request",
Method="Initialize",
Payload=dict(
clientVersion=__version__, culture="en-US", requestTimeout="00:00:05.00"
),
)
while True:
req = self._read("Initialize")
if req is None:
raise RuntimeError("failed to initialize")
if req["Type"] == "Response":
if req["Payload"]["ResponseCode"] == "Success":
return True
raise RuntimeError(req["Payload"])
raise RuntimeError(req)
def get_credentials(self, url, allow_prompt=False):
self.send(
Type="Request",
Method="GetAuthenticationCredentials",
Payload=dict(
uri=url,
IsRetry=False,
IsNonInteractive=not allow_prompt,
CanShowDialog=allow_prompt,
),
)
while True:
req = self._read("GetAuthenticationCredentials")
if not req:
raise RuntimeError("failed to get credentials")
elif req["Type"] == "Response":
payload = req["Payload"]
if payload["ResponseCode"] == "Success":
return payload["Username"], payload["Password"]
elif payload["ResponseCode"] == "NotFound":
return None, None
else:
raise RuntimeError(payload["ResponseCode"])
else:
break
|
"""Build the desired software environment"""
import sys
import os
import os.path
from azureml.core import Environment
from azureml.core.conda_dependencies import CondaDependencies
sys.path.append(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '../..')))
from scripts.authentication.service_principal import ws
def build_env():
# Conda dependencies
conda_dep = CondaDependencies()
conda_dep.add_pip_package("tensorflow")
conda_dep.add_pip_package("pandas-datareader")
conda_dep.add_pip_package("scikit-learn")
conda_dep.add_pip_package("python-dotenv")
conda_dep.add_pip_package("matplotlib")
# Env specifications
baseline_env = Environment(name="model_env")#, file_path='./requirements.txt')
baseline_env.python.conda_dependencies=conda_dep
#baseline_env = Environment.from_pip_requirements(name="for_some", file_path='./requirements.txt')
baseline_env.docker.base_image = None
baseline_env.docker.base_dockerfile = "./scripts/setup/Dockerfile"
# Register the environment
baseline_env.register(workspace=ws)
# Build the env
build = baseline_env.build(workspace=ws)
build.wait_for_completion(show_output=True)
if __name__ == "__main__":
build_env()
|
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Function
import numpy as np
from network import *
from loss import *
from data import DataGenerator
import match
class UniNet:
def __init__(
self,
input_w=512,
input_h=64,
train_txt='./dataset/Train/NDtrain.txt',
evaL_txt='./dataset/Train/NDeval.txt',
snapshots='./snapshots/',
alpha=0.2,
epoch=1000,
batch_size=90,
people_per_batch=45,
imgs_per_person=40,
lr=1e-3,
lr_decay=1e-6
):
self.input_w = input_w
self.input_h = input_h
self.train_txt = train_txt
self.eval_txt = evaL_txt
self.snapshots = snapshots
self.alpha = alpha
self.epoch = epoch
self.batch_size = batch_size
self.people_per_batch = people_per_batch
self.imgs_per_person = imgs_per_person
self.lr = lr
self.lr_decay = lr_decay
''' calculate the gradient: ETL/fp, ETL/fa, ETL/fn '''
def get_grad(self, loss, fp, fa, fn, fp_mask, fa_mask, fn_mask, b_AP, b_AN):
if loss == 0.:
return torch.zeros_like(fp), torch.zeros_like(fa), torch.zeros_like(fn)
batch_size = fp.shape[0]
zero = torch.tensor(0.).to(self.device)
fp_mask_offset = torch.zeros_like(fp_mask)
fn_mask_offset = torch.zeros_like(fn_mask)
fa_b_AP = torch.zeros_like(fa)
fa_b_AN = torch.zeros_like(fa)
''' shifted fp, fp with -b_AP and -b_AN '''
fp_AP_b = torch.zeros_like(fp)
fn_AN_b = torch.zeros_like(fn)
for i in range(batch_size):
fp_mask_offset[i, :, :] = match.shiftbits_torch(fp_mask[i], b_AP[i])
fa_b_AP[i, 0, :, :] = match.shiftbits_torch(fa[i, 0, :, :], b_AP[i])
fp_AP_b[i, 0, :, :] = match.shiftbits_torch(fp[i, 0, :, :], -b_AP[i])
fn_mask_offset[i, :, :] = match.shiftbits_torch(fn_mask[i], b_AN[i])
fa_b_AN[i, 0, :, :] = match.shiftbits_torch(fa[i, 0, :, :], b_AN[i])
fn_AN_b[i, 0, :, :] = match.shiftbits_torch(fn[i, 0, :, :], -b_AN[i])
M_ap = (fa_mask == fp_mask_offset) & (fp_mask_offset == 1.)
M_an = (fa_mask == fn_mask_offset) & (fn_mask_offset == 1.)
norm_M_ap = torch.sum(M_ap, dim=[1, 2])
norm_M_an = torch.sum(M_an, dim=[1, 2])
norm_M_ap = norm_M_ap.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
norm_M_an = norm_M_an.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
grad_etl2fp = 2. * (fa_b_AP - fp) / (batch_size * norm_M_ap)
grad_etl2fn = 2. * (fa_b_AN - fn) / (batch_size * norm_M_an)
grad_etl2fp_AP_b = - 2. * (fa_b_AP - fp_AP_b) / (batch_size * norm_M_ap)
grad_etl2fn_AN_b = 2. * (fa_b_AN - fn_AN_b) / (batch_size * norm_M_an)
grad_etl2fp_ = grad_etl2fp.clone()
grad_etl2fn_ = grad_etl2fn.clone()
grad_etl2fp_AP_b_ = grad_etl2fp_AP_b.clone()
grad_etl2fn_AN_b_ = grad_etl2fn_AN_b.clone()
for i in range(batch_size):
grad_etl2fp_[i] = torch.where(M_ap[i] == True, grad_etl2fp[i], zero)
grad_etl2fp_AP_b_[i] = torch.where(M_ap[i] == True, grad_etl2fp_AP_b[i], zero)
grad_etl2fn_[i] = torch.where(M_an[i] == True, grad_etl2fn[i], zero)
grad_etl2fn_AN_b_[i] = torch.where(M_an[i] == True, grad_etl2fn_AN_b[i], zero)
grad_etl2fa_ = grad_etl2fp_AP_b_ + grad_etl2fn_AN_b_
return grad_etl2fp_, grad_etl2fa_, grad_etl2fn_
def train(self):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.featnet = FeatNet()
self.featnet = torch.nn.DataParallel(self.featnet)
self.featnet.to(self.device)
self.etl_loss = ETLoss(self.alpha, self.device)
''' set data generator '''
self.train_dataGenerator = DataGenerator(
txt=self.train_txt,
batch_size=self.batch_size,
people_per_batch=self.people_per_batch,
imgs_per_person=self.imgs_per_person
)
''' set optimizer '''
self.optimizer = torch.optim.Adam(self.featnet.parameters(), self.lr)
epoch_loss_list = []
for epoch in range(self.epoch):
print('INFO: Epoch={}'.format(epoch+1))
self.train_dataGenerator.reset(self.featnet, self.device)
train_g = self.train_dataGenerator.gen()
epoch_loss = 0.0
for step in range(self.train_dataGenerator.batches):
self.optimizer.zero_grad()
triplet_batch = next(train_g)
img_ps = triplet_batch['ps'].to(self.device)
img_ps_mask = triplet_batch['ps_mask'].to(self.device)
img_as = triplet_batch['as'].to(self.device)
img_as_mask = triplet_batch['as_mask'].to(self.device)
img_ns = triplet_batch['ns'].to(self.device)
img_ns_mask = triplet_batch['ns_mask'].to(self.device)
fp, fa, fn = self.featnet(img_ps), self.featnet(img_as), self.featnet(img_ns)
etl_loss, b_AP, b_AN = self.etl_loss.forward(
fp, fa, fn, img_ps_mask, img_as_mask, img_ns_mask)
epoch_loss += etl_loss.item()
fp.retain_grad()
fa.retain_grad()
fn.retain_grad()
etl_loss.backward()
if (step + 1) % 10 == 0:
print('INFO: Steps={}, ETL loss={}'.format(step+1, etl_loss.item()))
torch.save(
self.featnet,
self.snapshots + 'featnet_epoch{}_steps{}.pth'.format(epoch+1, step+1))
grad_etl2fp, grad_etl2fa, grad_etl2fn = self.get_grad(
etl_loss,
fp, fa, fn,
img_ps_mask, img_as_mask, img_ns_mask,
b_AP, b_AN
)
''' replace gradients '''
fp.grad.data = grad_etl2fp.data
fa.grad.data = grad_etl2fa.data
fn.grad.data = grad_etl2fn.data
self.optimizer.step()
epoch_loss_list.append(epoch_loss)
np.save('static/epoch_loss.npy', epoch_loss_list)
print('INFO: Epoch {} done, total loss={}'.format(epoch+1, epoch_loss))
|
import re
import requests
import argparse
import sys
parser = argparse.ArgumentParser(description='Load VitaDock exports into Runalyze.com')
parser.add_argument('-k', '--key', required=True, help='The personal API for Runalyze.com')
parser.add_argument('-t', '--type', required=True, help='The type of data set to load.', choices=['bloodPressure', 'bodyComposition'])
parser.add_argument('-f', '--file', type=argparse.FileType('r'), default=sys.stdin, help='The input file to read from. This is an export file from VitaDock online. Default is to read from stdin.')
parser.add_argument('-v', '--version', action='version', version='loadVitaDock v1.0')
RUNALYZE_API_ENDPOINT = 'https://runalyze.com/api/v1/'
args = parser.parse_args()
headers = { "token": args.key }
def loadData(content):
r = requests.post(RUNALYZE_API_ENDPOINT + 'metrics/' + args.type, json=content, headers=headers)
print(r.content)
if args.type == 'bloodPressure':
PATTERN = '"(.*)/(.*)/(.*) - (.*)";"(.*) mmHg";"(.*) mmHg";"(.*) bpm";"(.*)";"(.*)";" "'
for reading in args.file:
if match := re.search(PATTERN, reading):
jsoncontent = { "date_time": match.group(3) + "-" + match.group(1) + "-" + match.group(2) + "T" + match.group(4) + ":00Z", "systolic": int(match.group(5)), "diastolic": int(match.group(6)), "heart_rate": int(match.group(7)) }
loadData(jsoncontent)
elif args.type == 'bodyComposition':
PATTERN = '(.*)/(.*)/(.*) - (.*);(.*);(.*);(.*);(.*);(.*);(.*);.*;.*;.*'
for reading in args.file:
if match := re.search(PATTERN, reading):
jsoncontent = { "date_time": match.group(3) + "-" + match.group(1) + "-" + match.group(2) + "T" + match.group(4) + ":00Z", "weight": float(match.group(5)), "fat_percentage": float(match.group(7)), "water_percentage": float(match.group(8)), "muscle_percentage": float(match.group(9)), "bone_percentage": float(match.group(6)) }
loadData(jsoncontent)
|
from dubbo_client import ApplicationConfig, ZookeeperRegistry, DubboClient, DubboClientError
service_interface = 'com.truthso.monitor.service.CompareService'
registry = ZookeeperRegistry('127.0.0.1:2181')
compare_provider = DubboClient(service_interface, registry, version='1.0.0', group='gaopin')
print(compare_provider.compare({
u'width': 650,
u'height': 433,
u'phash': u'1011100001101000101100001101110101101100101001010101111001001010',
u'sum': 5429,
u'ave': 5.0,
u'dc': 4331,
u'rSum': 144219,
u'gSum': 142677,
u'bSum': 136723,
u'hash': 4064693128758910538,
}))
|
# Approach 1:
def Remove_Row_with_Custom_Element(Test_list, Check_list):
for row in Test_list:
for ele in Check_list:
if ele in row:
Test_list.remove(row)
return Test_list
Test_list = [[5, 3, 1], [7, 8, 9], [1, 10, 22], [12, 18, 21]]
Check_list = [3, 10, 19, 29, 20, 15]
print(Remove_Row_with_Custom_Element(Test_list, Check_list))
# Approach 2:
def Remove_Row_with_Custom_Element(Test_list, Check_list):
return [row for row in Test_list if not any(ele in row for ele in Check_list)]
Test_list = [[5, 3, 1], [7, 8, 9], [1, 10, 22], [12, 18, 21]]
Check_list = [3, 10, 19, 29, 20, 15]
print(Remove_Row_with_Custom_Element(Test_list, Check_list))
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
from japandas.io.estat import EStatReader
from pandas_datareader import data
_ohlc_columns_jp = ['始値', '高値', '安値', '終値', '出来高', '調整後終値*']
_ohlc_columns_en = ['Open', 'High', 'Low', 'Close', 'Volume', 'Adj Close']
def DataReader(symbols, data_source=None, start=None, end=None, appid=None, **kwargs):
if data_source == 'yahoojp':
msg = "YahooJPReaderは削除されました https://www.yahoo-help.jp/app/answers/detail/p/546/a_id/93575"
raise NotImplementedError(msg)
elif data_source == 'estat':
return EStatReader(symbols=symbols, appid=appid, **kwargs).read()
else:
return data.DataReader(name=symbols, data_source=data_source,
start=start, end=end, **kwargs)
DataReader.__doc__ = data.DataReader.__doc__
|
#!/usr/bin/env python3
import sys
import datetime
import pytz
from traffic.core.api import getOptimalRouteTime
def main():
origin_address = "Savoy+Ct,+London+WC2R+0EZ"
destination_address = "Francis+St,+Westminster,+London+SW1P+1QW"
traffic_model = "pessimistic"
startTime = datetime.datetime(2020, 2, 3, 9, 0, tzinfo=pytz.timezone('Europe/London'))
try:
time = getOptimalRouteTime(startTime, origin_address, destination_address, model=traffic_model)/60.
print("Quickest route time is: {} minutes.".format(time/60.))
except OSError as err:
print("OS error: {0}".format(err))
except ValueError as err:
print(err)
except:
print("Unexpected error:", sys.exc_info()[0])
if __name__ == "__main__":
main()
|
# Class for a server
class fprint:
@staticmethod
def cmd(string, prefix="[$] ", suffix="", end="\n"):
print(prefix + string + suffix, end=end)
class Identifier:
def __init__(self):
self.re = __import__("re")
def is_ip(self, obj):
if self.is_string(obj):
real_ip = r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"
return self.re.match(real_ip, obj)
return False
def is_port(self, obj):
if self.is_integer(obj):
return 0 <= obj <= 255
return False
@staticmethod
def is_string(obj):
return isinstance(obj, str)
@staticmethod
def is_integer(obj):
return isinstance(obj, int)
"""
Main Server module
Server(server_ip="", server_port="") - Sets up the server variable
Server.start() - Starts the Server.listener() and Server.responder() threads
Other Server modules
Server.get_server_ip() - Returns server IP
Server.set_server_ip(new_server_ip) - Sets a new server IP
Server.get_server_port() - Returns server Port
Server.set_server_port(new_server_port) - Sets a new server Port
Private Server modules
Server.listener() -
Server.responder() -
"""
class Server:
def __init__(self, server_ip=None, server_port=None):
# Modules
self.identify = Identifier()
self.thread = __import__("_thread")
# Server variables
self.server_ip = None
self.set_server_ip(server_ip)
self.server_port = None
self.set_server_port(server_port)
def get_server_ip(self):
return self.server_ip
def set_server_ip(self, new_server_ip):
if self.identify.is_ip(new_server_ip):
self.server_ip = new_server_ip
def set_server_port(self, new_server_port):
if self.identify.is_port(new_server_port):
self.server_port = new_server_port
def start(self):
self.thread.start_new_thread(self.listener, ())
self.thread.start_new_thread(self.responder, ())
def listener(self):
is_running = True
while is_running:
pass
def listener_user(self, socket, ip, port):
def responder(self):
is_running = True
while is_running:
pass
|
from dataclasses import dataclass
from typing import Dict, List, Iterator, Tuple
import pandas as pd
from covid_model_seiir_pipeline.lib import (
utilities,
)
@dataclass(repr=False, eq=False)
class ODEParameters:
# Core parameters
alpha: pd.Series
sigma: pd.Series
gamma1: pd.Series
gamma2: pd.Series
# Variant prevalences
rho: pd.Series
rho_variant: pd.Series
rho_b1617: pd.Series
rho_total: pd.Series
# Escape variant initialization
pi: pd.Series
# Cross-variant immunity
chi: pd.Series
# Vaccine parameters
vaccinations_unprotected_lr: pd.Series
vaccinations_non_escape_protected_lr: pd.Series
vaccinations_escape_protected_lr: pd.Series
vaccinations_non_escape_immune_lr: pd.Series
vaccinations_escape_immune_lr: pd.Series
vaccinations_unprotected_hr: pd.Series
vaccinations_non_escape_protected_hr: pd.Series
vaccinations_escape_protected_hr: pd.Series
vaccinations_non_escape_immune_hr: pd.Series
vaccinations_escape_immune_hr: pd.Series
def to_dict(self) -> Dict[str, pd.Series]:
return {k: v.rename(k) for k, v in utilities.asdict(self).items()}
def to_df(self) -> pd.DataFrame:
return pd.concat(self.to_dict().values(), axis=1)
def reindex(self, index: pd.Index) -> 'ODEParameters':
# noinspection PyArgumentList
return type(self)(
**{key: value.reindex(index) for key, value in self.to_dict().items()}
)
def get_vaccinations(self, vaccine_types: List[str], risk_group: str) -> pd.DataFrame:
vaccine_type_map = {
'u': 'vaccinations_unprotected',
'p': 'vaccinations_non_escape_protected',
'pa': 'vaccinations_escape_protected',
'm': 'vaccinations_non_escape_immune',
'ma': 'vaccinations_escape_immune',
}
vaccinations = []
for vaccine_type in vaccine_types:
attr = f'{vaccine_type_map[vaccine_type]}_{risk_group}'
vaccinations.append(getattr(self, attr).rename(attr))
return pd.concat(vaccinations, axis=1)
def __iter__(self) -> Iterator[Tuple[int, 'ODEParameters']]:
location_ids = self.alpha.reset_index().location_id.unique()
this_dict = self.to_dict().items()
for location_id in location_ids:
# noinspection PyArgumentList
loc_parameters = type(self)(
**{key: value.loc[location_id] for key, value in this_dict},
)
yield location_id, loc_parameters
@dataclass(repr=False, eq=False)
class FitParameters(ODEParameters):
# Transmission intensity
new_e: pd.Series = None
kappa: pd.Series = None
phi: pd.Series = None
psi: pd.Series = None
# Sub-populations
population_low_risk: pd.Series = None
population_high_risk: pd.Series = None
@dataclass(repr=False, eq=False)
class ForecastParameters(ODEParameters):
# Transmission intensity
beta: pd.Series = None
beta_wild: pd.Series = None
beta_variant: pd.Series = None
beta_hat: pd.Series = None
|
# Generated by Django 2.1.3 on 2018-11-15 09:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='wine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=500)),
('slug', models.SlugField(max_length=40)),
('grower', models.CharField(max_length=200)),
('year', models.IntegerField(blank=True, default=0, null=True)),
('color', models.CharField(choices=[('red', 'Rot'), ('white', 'Weiß'), ('rose', 'Rose')], max_length=5)),
('front_img', models.ImageField(blank=True, null=True, upload_to='')),
('back_img', models.ImageField(blank=True, null=True, upload_to='')),
('rating', models.IntegerField(blank=True, null=True)),
('notes', models.CharField(blank=True, max_length=500, null=True)),
('country', models.CharField(blank=True, max_length=50, null=True)),
('provine', models.CharField(blank=True, max_length=50, null=True)),
],
),
]
|
from math import sin, cos, tan, radians
angulo = float(input('Digite o angulo que você deseja: '))
seno = sin(radians(angulo))
coseno = cos(radians(angulo))
tangente = tan(radians(angulo))
print('O angulo de {} tem o SENO de {:.2f}'.format(angulo, seno))
print('O angulo de {} tem o COSENO de {:.2f}'.format(angulo, coseno))
print('O angulo de {} tem o TANGENTE de {:.2f}'.format(angulo, tangente))
|
# -- coding:UTF-8 --
"""
Python implementation of decentralized update algorithm with multi threads
author: Yue Yang (@The-chosen)
"""
import sys
sys.path.insert(0, '../')
import argparse
import yaml
from math import fabs
from itertools import combinations
from copy import deepcopy
import time
import eventlet
import threading
from queue import PriorityQueue
from algorithm.a_star_mine import AStar
from algorithm.cbs_mine import *
# from a_star_mine import AStar
# from cbs_mine import *
# eventlet.monkey_patch()
# Global variables
pq = PriorityQueue()
solution = None
alive_agent_thread_num = None
DYNAMIC_OBSTACLES = None
INITIAL_RUNTIME = 3
DEFAULT_TIME_LIMITATION = 5
INF_NUM = 999999999999999
R = 4
THRESHOLD = 25
TIME_LIMIT = 20
TIMESTEP_TIME = 0.5
IS_TEST = False # If it's now testing cbs for initialization, then True.
class Server(threading.Thread):
"""
自定义一个类haha,必须要继承threading.Thread,下面必须要重写一个run()方法。
把要执行的函数写到run()方法里。如果没有run()方法就会报错。其实这个类的作用就是
通过haha类里面的run()方法来定义每个启动的子线程要执行的函数内容。
"""
def __init__(self, dimension, obstacles, agents, output_file, num_combine=2):
threading.Thread.__init__(self)
self.dimension = dimension
self.obstacles = obstacles
self.agents = agents
self.output_file = output_file
self.num_combine = num_combine # Number of conflict list to be combined
def run(self):
global alive_agent_thread_num, solution, pq, TIME_LIMIT
# Robots start
while True:
# agents all finish their paths
# print("alive_agent_thread_num: ", alive_agent_thread_num)
if alive_agent_thread_num == 0:
print("[INFO] FINAL FOUND SOLUTION: " + str(solution))
output = {}
output["schedule"] = solution
# output["cost"] = env.compute_solution_cost(solution)
with open(self.output_file, 'w') as output_yaml:
yaml.safe_dump(output, output_yaml)
return
if pq.empty():
continue
else:
compute_start_time = time.time()
# Combine several conflict list & get minimum anytime limitation
conflicts = []
min_anytime_limitation_timestep = INF_NUM
min_anytime_limitation = INF_NUM
for i in range(self.num_combine):
if pq.empty():
break
_, conflict, anytime_limitation_timestep, anytime_limitation = pq.get()
conflicts += conflict
if anytime_limitation_timestep < min_anytime_limitation_timestep:
min_anytime_limitation_timestep = anytime_limitation_timestep
if anytime_limitation < min_anytime_limitation:
min_anytime_limitation = anytime_limitation
# change agents start time
agents_cp = deepcopy(self.agents)
for agent in agents_cp:
length = len(solution[agent['name']])
if min_anytime_limitation_timestep >= length:
temp = solution[agent['name']][length - 1]
else:
temp = solution[agent['name']][min_anytime_limitation_timestep]
agent['start'] = [temp['x'], temp['y']]
# No dangerous points
if len(conflicts) == 0:
continue
print("===========================================")
env = Environment(self.dimension, agents_cp, self.obstacles, obstacles_d=conflicts)
# Searching
cbs = CBS(env, min_anytime_limitation)
print('[INFO] Start common searching ...')
print("[INFO] Anytime limitation: " + str(min_anytime_limitation))
# with eventlet.Timeout(TIME_LIMIT, False):
solution_crr = cbs.search()
if not solution_crr:
print('[ERROR] Solution not found!')
continue
print('[INFO] Common searching ends')
# Get previous solution
solution_pre = solution
# util: map function
def f(x):
x['t'] += min_anytime_limitation_timestep
return x
# combine previous solution [:timestep + anytime_limitation] and new solution
for agent in solution_pre.keys():
# print("solution_crr: " + str(solution_crr))
# print("solution_pre: " + str(solution_pre))
solution_crr[agent] = solution_pre[agent][:min_anytime_limitation_timestep] + (list(map(f, solution_crr[agent])))
solution = solution_crr
print('[INFO] SOLUTION:')
print(solution)
compute_end_time = time.time()
print('[INFO] Common searching use time: ' + str(compute_end_time - compute_start_time))
class Agent(threading.Thread):
'''
@Params:
agent_name: name of the agent.
R: Range the agent can see. It's a square(2R + 1 * 2R + 1).
paths: Paths of all the agents.
threshold: If score of one point is larger than this parameter, the point will be added to constraint list
timestep_time: time limitation for each timestep
'''
def __init__(self, agent_name, timestep_time=TIMESTEP_TIME, R=R, threshold=THRESHOLD):
global solution
threading.Thread.__init__(self)
self.R = R
self.agent_name = agent_name
self.paths = deepcopy(solution)
self.threshold = threshold
self.timesteps = len(self.paths[self.agent_name])
self.timestep_time = timestep_time
self.detect_pt = []
self.detect_obstacle = []
self.utils = Utils()
def run(self):
global alive_agent_thread_num, solution
for timestep in range(self.timesteps):
self.paths = deepcopy(solution)
self.do_things_in_one_timestep(timestep)
alive_agent_thread_num -= 1
print('[INFO] ' + str(self.agent_name) + ' end its path!')
# @return: (conflicts[], anytime_limitation + timestep)
def do_things_in_one_timestep(self, timestep):
global pq
start_time = time.time()
# ↓↓↓↓↓ Do things from here ↓↓↓↓↓
self.detect_pt = []
points = self.detect(timestep, DYNAMIC_OBSTACLES)
if not points:
end_time = time.time()
use_time = end_time - start_time
time.sleep(self.timestep_time - use_time) # ensure the thing will be done within timestep_time
real_end = time.time()
# print(self.agent_name + ' finish timestep ' + str(timestep) + \
# '. Use time: ' + str(real_end - start_time) + '. Have collision points.')
return
conflict_list_agent, anytime_limitation, min_cost_pt = self.find_constraint_list()
if anytime_limitation == 'DEFAULT':
anytime_limitation = DEFAULT_TIME_LIMITATION
pq.put((min_cost_pt, conflict_list_agent, anytime_limitation + timestep, anytime_limitation * self.timestep_time))
# ↑↑↑↑↑ End things from above ↑↑↑↑↑
end_time = time.time()
use_time = end_time - start_time
time.sleep(self.timestep_time - use_time) # ensure the thing will be done within timestep_time
real_end = time.time()
# print(self.agent_name + ' finish timestep ' + str(timestep) + \
# '. Use time: ' + str(real_end - start_time) + '. No collision points.')
'''
@Params:
t: current timestep
@Return:
scored list of points
@Exlanation: In reality, these should be a function to accept data of sensors.
Need to judge whether the occupied thing is obstacle or not.
'''
def detect(self, t, dynamic_obstacles):
self.crr_t = t
# Form the detection square
# print(self.paths.keys())
if len(self.paths[self.agent_name]) <= t: # Have reached end point
return False
central_pos = self.paths[self.agent_name][t]
x_right = central_pos['x'] + R
x_left = central_pos['x'] - R
y_up = central_pos['y'] + R
y_bottom = central_pos['y'] - R
# Find points of path that are in the detection square
for agent_name, path in self.paths.items():
if t >= len(path): # No action after t for this agent, then it will be static and detect obstacles
crr_pos = path[len(path) - 1]
else:
crr_pos = path[t]
for i in range(t + 1, len(path)):
pos = path[i]
if (pos['x'] <= x_right) and (pos['x'] >= x_left) and (pos['y'] <= y_up) and (pos['y'] >= y_bottom):
pos['distance'] = i - t
pos['agent_name'] = agent_name
pos['crr_pos'] = crr_pos
self.detect_pt.append(pos)
# Find obstacles that are dangerous and calculate the score of every point
is_dangerous_obs = False
for obstacle_name, path in dynamic_obstacles.items():
is_obstacle_static = False
if t >= len(path): # No action after t for this obstacle
pos = path[len(path) - 1]
is_obstacle_static = True
self.dyn_pos = pos # V2: you cannot run into the around positions of the dynamic obstacle
else:
pos = path[t] # pos: position of dynamic obstacle
self.dyn_pos = pos # V2: you cannot run into the around positions of the dynamic obstacle
if (pos['x'] <= x_right) and (pos['x'] >= x_left) and (pos['y'] <= y_up) and (pos['y'] >= y_bottom):
for pt in self.detect_pt: # pt: point on the path
# For stop dynamic obstacles, only when it's on path will be considered.
if is_obstacle_static:
if pos['x'] == pt['x'] and pos['y'] == pt['y']:
is_dangerous_obs = True
if 'score' not in pt.keys():
pt['score'] = 0
pt['score'] += self.utils.score_func(pt, pos, pt['crr_pos'])
continue
# Else, use score function to calculate how dangerous it is
if abs(pos['x'] - pt['x']) + abs(pos['y'] - pt['y']) <= pt['distance']: # bool to decide whether dangerous
# print(pos)
# print(pt)
is_dangerous_obs = True
if 'score' not in pt.keys():
pt['score'] = 0
pt['score'] += self.utils.score_func(pt, pos, pt['crr_pos'])
# # Position of dynamic obstacles
# pt['pos_x'] = pos['x']
# pt['pos_y'] = pos['y']
if len(self.detect_pt) == 0 or not is_dangerous_obs:
# print('No point or dangerous obstacles detected!')
return False
return self.detect_pt
'''
@Params:
None
@Return:
Constraint dictionary:
{'agent2': <__main__.Constraints object at 0x7fa54c69c2b0>,
'agent7': <__main__.Constraints object at 0x7fa54c69c2b0>}
'''
def find_constraint_list(self):
constraint_dict = {}
conflict_list = []
min_cost_pt = INF_NUM
is_path_change = False # whether there's point score > threshold. If yes -> True, else -> False
for pt in self.detect_pt:
if 'score' not in pt.keys():
continue
if pt['score'] >= self.threshold:
is_path_change = True
if pt['t'] - self.crr_t < min_cost_pt: # for calculating anytime
min_cost_pt = pt['t'] - self.crr_t
conflict_list.append((pt['x'], pt['y']))
# v_constraint = VertexConstraint(pt['t'], Location(pt['x'], pt['y']))
# if pt['agent_name'] in constraint_dict.keys():
# constraint_dict[pt['agent_name']].vertex_constraints |= {v_constraint}
# else:
# constraint = Constraints()
# constraint.vertex_constraints |= {v_constraint}
# constraint_dict[pt['agent_name']] = constraint
if is_path_change:
anytime_limitation = self.utils.anytime_func(min_cost_pt)
else:
anytime_limitation = 'DEFAULT'
# V2: Add round space of dynamic obstacles
conflict_list.append((self.dyn_pos['x'], self.dyn_pos['y']))
conflict_list.append((self.dyn_pos['x'] + 1, self.dyn_pos['y']))
conflict_list.append((self.dyn_pos['x'] - 1, self.dyn_pos['y']))
conflict_list.append((self.dyn_pos['x'], self.dyn_pos['y'] + 1))
conflict_list.append((self.dyn_pos['x'], self.dyn_pos['y'] - 1))
return conflict_list, anytime_limitation, min_cost_pt
class Utils(object):
def __init__(self):
pass
'''
@Params:
pos_pt: Position of the point.
pos_obs: Position of the obstacle.
pos_agent: Position of the agent.
@Return:
Score of the point.
'''
def score_func(self, pos_pt, pos_obs, pos_agent):
dist = abs(pos_pt['x'] - pos_obs['x']) + abs(pos_pt['y'] - pos_obs['y'])
cost = abs(pos_pt['x'] - pos_agent['x']) + abs(pos_pt['y'] - pos_agent['y'])
score = 0
if dist <= 1:
score = INF_NUM
elif dist <= 3:
score = THRESHOLD * 1
elif dist == 4:
score = THRESHOLD * 0.75
elif dist == 5:
score = THRESHOLD * 0.5
elif dist >= 6:
score = THRESHOLD * 0.1
if cost <= 3:
score *= 1
elif cost == 4:
score *= 0.8
elif cost == 5:
score *= 0.5
elif cost == 6:
score *= 0.2
elif cost >= 7:
score *= 0.1
return int(score)
'''
@Params:
t_pos: timestep of the point.
t_crr: timestep of current agent.
'''
def anytime_func(self, cost):
# return cost // 2
return cost - 1
def main():
global solution, alive_agent_thread_num, DYNAMIC_OBSTACLES
# Parser part
parser = argparse.ArgumentParser()
parser.add_argument("param", help="input file containing map and obstacles")
parser.add_argument("output", help="output file with the schedule")
parser.add_argument("dynamic_obs", help="dynamic obs")
if IS_TEST:
parser.add_argument("N", help="number for experiments")
parser.add_argument("agent_num", help="number of agents")
parser.add_argument("obstacle_prob", help="probability of static obstacles")
args = parser.parse_args()
N, agent_num, obstacle_prob = None, None, None
if IS_TEST:
N = args.N if IS_TEST else 0
agent_num = args.agent_num if IS_TEST else 0
obstacle_prob = args.obstacle_prob if IS_TEST else 0
# Read from input file
print('Read from input ...')
with open(args.param, 'r') as param_file:
try:
param = yaml.load(param_file, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
print(exc)
dimension = param["map"]["dimensions"]
obstacles = param["map"]["obstacles"]
agents = param['agents']
# Initial searching
env = Environment(dimension, agents, obstacles)
cbs = CBS(env, INITIAL_RUNTIME)
print('[INFO] Start initial searching ...')
solution = cbs.search(N, agent_num, obstacle_prob)
print('[INFO] Initial searching end')
if not solution:
if IS_TEST:
with open('consume_time_stats/results_' + str(agent_num) + 'agents_' + \
str(obstacle_prob) + '%.csv', 'a+', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)
# writer.writerow(["consume_time", "cost"])
writer.writerow(["Not found", "Not found"])
print("[ERROR] Initial solution not found" )
return
if IS_TEST:
print("[INFO] INITIAL FOUND SOLUTION: " + str(solution))
output = {}
output["schedule"] = solution
# output["cost"] = env.compute_solution_cost(solution)
with open(args.output, 'w') as output_yaml:
yaml.safe_dump(output, output_yaml)
return
# Assign value to global variables(alive_agent_thread_num & DYNAMIC_OBSTACLES)
alive_agent_thread_num = len(agents)
with open(args.dynamic_obs, 'r') as d_obstacles_file:
try:
DYNAMIC_OBSTACLES = yaml.load(d_obstacles_file, Loader=yaml.FullLoader)['schedule']
except yaml.YAMLError as exc:
print(exc)
# Create threads including server and agents
threads = []
server_thread = Server(dimension, obstacles, agents, args.output) # create server thread
threads.append(server_thread)
for agent in agents:
agent_name = agent['name']
agent_thread = Agent(agent_name) # create agent thread
threads.append(agent_thread)
# Start threads
for thr in threads:
thr.start()
for thr in threads:
if thr.is_alive():
thr.join()
if __name__=='__main__':
main()
|
#!/usr/bin/env python3
import csv
# Convert the national ELR flat file into a schema YAML
def main():
with open('PDI_fields.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count > 0:
print(f'- name: {row[0]}')
print(f' csvField: {row[0]}')
print(f'')
line_count += 1
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# coding: utf-8
#
# Code (c) 2009 Liam Cooke
# See LICENSE.txt
#
# Content (c) 2009 David Malki !
# See http://wondermark.com/554/
vocab = dict(
location_adj = u"""
neo-noir
alternate-history
ancient
post-apocalyptic
dystopian
VR-simulated
metaphorical
anachronistic
leather-clad
coal-powered
dragon-filled
shrill
""",
location_noun = u"""
America
Japan
Soviet Russia
Victorian Britain
medieval Europe
Aztec empire
Atlantis
terraformed Mars
Antarctica
one-way spaceflight
Outer Rim world
set from <i>Road Warrior</i>
""",
protagonist = u"""
flying message courier
student of metaphysics
milquetoast office drone
schlub with mild <abbr>OCD</abbr>
farm boy with dreams
techno-obsessed geek
brooding loner
wisecracking mercenary
idealistic revolutionary
journeyman inventor
collector of oddities
author self-insert
""",
discovery = u"""
magic diadem
arcane prophecy
dusty tome
crazy old man
alien artifact
enchanted sword
otherworldly portal
dream-inducing drug
encrypted data feed
time-traveling soldier
exiled angel
talking fish
""",
adversary = u"""
a megalomaniacal dictator
a government conspiracy
a profit-obsessed corporation
a sneering wizard
supernatural monsters
computer viruses made real
murderous robots
an army led by a sadist
forces that encourage conformity
a charismatic politician on the rise
humanity’s selfish nature
his own insecurity <i>vis-à-vis</i> girls
""",
assistant = u"""
sarcastic female techno-geek
tomboyish female mechanic
shape-shifting female assassin
leather-clad female in shades
girl who's always loved him
bookish female scholar with mousy brown hair
cherubic girl with pigtails and spunk
female who inexplicably becomes attracted to the damaged protagonist for unstated reasons
""",
inventory = u"""
wacky pet
welding gear
closet full of assault rifles
reference book
cleavage
facility with magic
condescending tone
discomfort in formal wear
""",
conflict = u"""
a fistfight atop a tower
a daring rescue preceding a giant explosion
a heroic sacrifice that no one will ever remember
a philosophical argument punctuated by violence
a false victory with the promise of future danger
the invocation of a spell at the last possible moment
eternal love professed without irony
the land restored to health
authorial preaching through the mouths of the characters
convoluted nonsense that squanders the readers’ goodwill
wish-fulfillment solutions to real-world problems
a cliffhanger for the sake of prompting a series
""",
title_adj = u"""
Chrono
Neuro
Aero
Cosmo
Reve
Necro
Cyber
Astro
Psycho
Steam
Meta
Black
""",
title_noun = u"""
punks
mechs
noiacs
opolis
nauts
phages
droids
bots
blades
trons
mancers
Wars
"""
)
for part, words in vocab.items():
vocab[part] = tuple(word.strip() for word in words.strip().split('\n'))
if __name__ == '__main__':
import json
print json.dumps(vocab)
|
import shlex
from azul import config
from azul.template import emit
emit({
"resource": [
{
"google_service_account": {
"indexer": {
# We set the count to 0 to ensure that the destroy provisioner runs.
# See https://www.terraform.io/docs/provisioners/index.html#destroy-time-provisioners
"count": 1 if config.subscribe_to_dss else 0,
"project": "${local.google_project}",
"account_id": config.qualified_resource_name('indexer'),
"display_name": f"Azul indexer in {config.deployment_stage}",
"provisioner": [
{
"local-exec": {
"command": ' '.join(map(shlex.quote, [
"python",
config.project_root + "/scripts/provision_credentials.py",
"google-key",
"--build",
"${google_service_account.indexer[0].email}",
]))
}
}, {
"local-exec": {
"when": "destroy",
"command": ' '.join(map(shlex.quote, [
"python",
config.project_root + "/scripts/provision_credentials.py",
"google-key",
"--destroy",
"${google_service_account.indexer[0].email}",
]))
}
}
]
}
},
"null_resource":{
"hmac-secret": {
"provisioner": [
{
"local-exec": {
"command": ' '.join(map(shlex.quote, [
"python",
config.project_root + "/scripts/provision_credentials.py",
"hmac-key",
"--build",
]))
}
}, {
"local-exec": {
"when": "destroy",
"command": ' '.join(map(shlex.quote, [
"python",
config.project_root + "/scripts/provision_credentials.py",
"hmac-key",
"--destroy",
]))
}
}
]
}
}
},
]
})
|
import sublime
import sublime_plugin
from .generic_shell import GenericShell
from .QuickMenu.QuickMenu import QuickMenu
from .macro import Macro
from .progress import ThreadProgress
from .settings import Settings
from .unit_collections import UnitCollections
TERMINALITY_VERSION = "0.3.10"
def plugin_loaded():
Settings.reset()
Settings.startup()
print("[Terminality] v%s" % (TERMINALITY_VERSION))
class TerminalityRunCommand(sublime_plugin.WindowCommand):
def parse_list(self, in_list, macros):
out_list = []
for value in in_list:
if isinstance(value, str):
value = Macro.parse_macro(
string=value,
custom_macros=macros
)
elif (isinstance(value, list) or
isinstance(value, tuple)):
value = self.parse_list(value, macros)
elif isinstance(value, dict):
value = self.parse_dict(value, macros)
out_list.append(value)
return out_list
def parse_dict(self, in_dict, macros):
for key in in_dict:
if isinstance(in_dict[key], str):
in_dict[key] = Macro.parse_macro(
string=in_dict[key],
custom_macros=macros
)
elif (isinstance(in_dict[key], list) or
isinstance(in_dict[key], tuple)):
in_dict[key] = self.parse_list(in_dict[key], macros)
elif isinstance(in_dict[key], dict):
in_dict[key] = self.parse_dict(in_dict[key], macros)
return in_dict
def run(self, selector=None, action=None, arguments_title=None):
if arguments_title is None or arguments_title == "":
self.run_command(selector, action)
return
self.window.show_input_panel(
caption=arguments_title + ":",
initial_text="",
on_done=lambda args: self.run_command(
selector=selector,
action=action,
arguments=args
),
on_change=None,
on_cancel=None
)
def run_command(self, selector=None, action=None, arguments=None):
execution_unit = None
execution_units = UnitCollections.load_default_collections()
# Global
additional_execution_units = Settings.get_global(
"execution_units",
default=Settings.get_global(
"additional_execution_units",
default={}
)
)
for sel in [x for x in ["*", selector] if x is not None]:
if (sel in additional_execution_units and
action in additional_execution_units[sel]):
execution_unit = additional_execution_units[sel][action]
if not isinstance(execution_unit, dict):
continue
# Local
additional_execution_units = Settings.get_local(
"execution_units",
default=Settings.get_local(
"additional_execution_units",
default={}
)
)
for sel in [x for x in ["*", selector] if x is not None]:
if (sel in additional_execution_units and
action in additional_execution_units[sel]):
execution_unit = additional_execution_units[sel][action]
if not isinstance(execution_unit, dict):
continue
elif (sel in execution_units and
action in execution_units[sel]):
execution_unit = execution_units[sel][action]
if not isinstance(execution_unit, dict):
continue
if execution_unit is None:
sublime.error_message("There is no such execution unit")
return
if not isinstance(execution_unit, dict):
if Settings.get("debug"):
print("Execution unit is ignored [%s][%s]" % (selector, action))
return
command = None
command_type = None
for key in ["command", "window_command", "view_command"]:
if key in execution_unit:
command_type = key
command = execution_unit[key]
break
if not command:
sublime.error_message("No command to run")
return
custom_macros = {}
required_macros = []
if "macros" in execution_unit:
custom_macros = execution_unit["macros"]
if "required" in execution_unit:
required_macros = execution_unit["required"]
if "location" not in execution_unit:
execution_unit["location"] = "$working"
is_not_windows = sublime.platform() != "windows"
command_script = [Macro.parse_macro(
string=cmd,
custom_macros=custom_macros,
required=required_macros,
escaped=is_not_windows,
arguments=arguments
) for cmd in command.split(" ")]
if command_type == "window_command" or command_type == "view_command":
args = {}
if "args" in execution_unit:
args = execution_unit["args"]
args = self.parse_dict(args, custom_macros)
if command_type == "window_command":
self.window.run_command(" ".join(command_script), args)
else:
self.window.active_view().run_command(
" ".join(command_script),
args
)
elif command_type == "command":
working_dir = Macro.parse_macro(
string=execution_unit["location"],
custom_macros=custom_macros,
required=required_macros,
arguments=arguments
)
if working_dir is None:
sublime.error_message(
"Working directory is invalid"
)
return
if Settings.get("debug"):
print("Running \"%s\"" % (" ".join(command_script)))
print("Working dir is \"%s\"" % (working_dir))
self.view = self.window.new_file()
self.view.set_name("Running...")
self.view.set_scratch(True)
if is_not_windows:
command_script = " ".join(command_script)
shell = GenericShell(
cmds=command_script,
view=self.view,
on_complete=lambda e, r, p: self.on_complete(
e, r, p, execution_unit
),
no_echo=("no_echo" in execution_unit and
execution_unit["no_echo"]),
read_only=("read_only" in execution_unit and
execution_unit["read_only"])
)
shell.set_cwd(working_dir)
shell.start()
ThreadProgress(
thread=shell,
message="Running",
success_message="Terminal has been stopped",
set_status=self.set_status,
view=self.view
)
elif Settings.get("debug"):
print("Invalid command type")
def on_complete(self, elapse_time, return_code, params, execution_unit):
if return_code is not None:
self.view.set_name(
"Terminal Ended (Return: {0}) [{1:.2f}s]".format(
return_code, elapse_time
)
)
if ("close_on_exit" in execution_unit and
execution_unit["close_on_exit"]):
self.view.window().focus_view(self.view)
self.view.window().run_command("close")
sublime.set_timeout(lambda: self.set_status(), 3000)
def set_status(self, status=None):
for window in sublime.windows():
for view in window.views():
if status is None:
view.erase_status("Terminality")
else:
view.set_status("Terminality", status)
class TerminalityCommand(sublime_plugin.WindowCommand):
"""
Command to show menu which use to run another command
"""
qm = None
ready_retry = 0
main_menu = {
"items": [["Terminality", "v" + TERMINALITY_VERSION]],
"actions": [""]
}
def get_execution_units(self, execution_units_map, selector):
execution_units = {}
# Default Execution Units
for selector_name in [x for x in ["*", selector] if x is not None]:
if selector_name in execution_units_map:
for action in execution_units_map[selector_name]:
execution_units[action] = execution_units_map[
selector_name
][action]
for selector_name in [x for x in ["*", selector] if x is not None]:
# Global
additional_execution_units = Settings.get_global(
"execution_units",
default=Settings.get_global(
"additional_execution_units",
default={}
)
)
if selector_name in additional_execution_units:
additional_execution_units = additional_execution_units[
selector_name
]
for key in additional_execution_units:
if (key in execution_units and
isinstance(additional_execution_units[key], dict)):
for sub_key in additional_execution_units[key]:
execution_units[key][
sub_key
] = additional_execution_units[key][sub_key]
else:
execution_units[key] = additional_execution_units[key]
if isinstance(additional_execution_units[key], dict):
execution_units[key]["selector"] = selector
# Local
additional_execution_units = Settings.get_local(
"execution_units",
default=Settings.get_local(
"additional_execution_units",
default={}
)
)
if selector_name in additional_execution_units:
additional_execution_units = additional_execution_units[
selector_name
]
for key in additional_execution_units:
if (key in execution_units and
isinstance(additional_execution_units[key], dict)):
for sub_key in additional_execution_units[key]:
execution_units[key][
sub_key
] = additional_execution_units[key][sub_key]
else:
execution_units[key] = additional_execution_units[key]
if isinstance(additional_execution_units[key], dict):
execution_units[key]["selector"] = selector
if Settings.get("debug") and not execution_units:
print("Execution units is empty")
return execution_units
def generate_menu(self, ask_arguments=False):
menu = {
"items": [], "actions": [],
"unsort_items": []
}
execution_units_map = UnitCollections.load_default_collections()
sel_name = None
for selector in execution_units_map:
if (len(self.window.active_view().find_by_selector(
selector)) > 0):
sel_name = selector
break
if not sel_name:
for selector in Settings.get("execution_units", {}):
if (len(self.window.active_view().find_by_selector(
selector)) > 0):
sel_name = selector
break
for selector in Settings.get("additional_execution_units", {}):
if (len(self.window.active_view().find_by_selector(
selector)) > 0):
sel_name = selector
break
if Settings.get("debug") and not sel_name:
print("Selector is not found")
execution_units = self.get_execution_units(
execution_units_map,
sel_name
)
# Generate menu
for action in execution_units:
execution_unit = execution_units[action]
if not isinstance(execution_unit, dict):
continue
if "selector" in execution_unit:
selector_name = execution_unit["selector"]
else:
selector_name = sel_name
custom_macros = {}
required_macros = []
platforms = None
arguments_title = None
if ask_arguments:
arguments_title = "Arguments"
if "arguments" in execution_unit:
arguments_title = execution_unit["arguments"]
if "macros" in execution_unit:
custom_macros = execution_unit["macros"]
if "required" in execution_unit:
required_macros = execution_unit["required"]
if "platforms" in execution_unit:
platforms = execution_unit["platforms"]
action_name = Macro.parse_macro(
string=action,
custom_macros=custom_macros,
required=required_macros,
arguments="<Arguments>" if ask_arguments else None
)
if platforms:
matched = False
current_platforms = [
sublime.platform() + "-" + sublime.arch(),
sublime.platform(),
sublime.arch()
]
for platform in current_platforms:
if platform in platforms:
matched = True
break
if not matched:
continue
if action_name is None:
if Settings.get("debug"):
print("Required params is not completed")
continue
if "name" in execution_unit:
action_name = Macro.parse_macro(
string=execution_unit["name"],
custom_macros=custom_macros,
required=required_macros,
arguments="<Arguments>" if ask_arguments else None
)
order = action_name
if "order" in execution_unit:
order = execution_unit["order"]
dest = action_name + " command"
if "description" in execution_unit:
dest = Macro.parse_macro(
string=execution_unit["description"],
custom_macros=custom_macros,
required=required_macros,
arguments="<Arguments>" if ask_arguments else None
)
menu["unsort_items"] += [[
action_name,
dest,
{
"command": "terminality_run",
"args": {
"selector": selector_name,
"action": action,
"arguments_title": arguments_title
}
},
order
]]
menu["unsort_items"] = sorted(menu["unsort_items"], key=lambda x: x[3])
while menu["unsort_items"]:
menu["items"].append(menu["unsort_items"][0][:-2])
menu["actions"].append(menu["unsort_items"][0][2])
menu["unsort_items"] = menu["unsort_items"][1:]
if (Settings.get("run_if_only_one_available") and
len(menu["items"]) == 1):
self.window.run_command(
"terminality_run",
menu["actions"][0]["args"]
)
return None
if len(menu["items"]) <= 0 and Settings.get("show_nothing_if_nothing"):
return None
menu["items"] += self.main_menu["items"]
menu["actions"] += self.main_menu["actions"]
return menu
def run(self, arguments=False, menu=None, action=None, replaceMenu=None):
"""
Show menu to user, if ready
"""
if not Settings.ready():
if self.ready_retry > 2:
sublime.message_dialog(
"Terminality is starting up..." +
"Please wait a few seconds and try again."
)
else:
sublime.status_message(
"Terminality is starting up..." +
"Please wait a few seconds and try again..."
)
self.ready_retry += 1
return
if self.qm is None:
self.qm = QuickMenu()
if replaceMenu is not None:
self.qm.setMenu(replaceMenu["name"], replaceMenu["menu"])
return
menu = self.generate_menu(arguments)
if menu is None:
return
self.qm.setMenu("main", menu)
self.qm.show(window=self.window, menu=menu, action=action)
|
from adjunct.exceptions import (
AdjunctSyntaxError,
AdjunctAttributeError,
)
def is_valid_identifier(name):
"""
Adjunct identifiers:
1) must be valid python identifiers
2) must NOT begin with an underscore
"""
retval = name.isidentifier()
if len(name) > 1:
if name[0] == '_':
retval = False
return retval
class AdjunctObject:
"""
Wraps objects to make them safe for use with Adjunct
This class is not strictly needed, but it is recommended if there is uncertainty
about the attributes of an object, that is, whether they can be used
to do nasty things
"""
def __init__(self, obj):
setattr(self, '__obj', obj)
def __getattr__(self, name):
if self.__is_safe_identifier(name) is False:
raise AdjunctAttributeError(f"{type(self.__obj).__name__} object has no attribute {name}")
return self.__obj.__getattr__(name)
def __setattr__(self, name, value):
if self.__is_safe_identifier(name) is False:
raise AdjunctSyntaxError("invalid syntax")
return self.__obj.__setattr__(name, value)
def __is_safe_identifier(self, name):
retval = name.isidentifier()
# Identifiers with double underscores are not accessible in Adjunct
if len(name) > 1:
if name[0:2] == '__':
retval = False
return retval
|
>>> Account = namedtuple('Account', 'owner balance transaction_count')
>>> default_account = Account('<owner name>', 0.0, 0)
>>> johns_account = default_account._replace(owner='John')
>>> janes_account = default_account._replace(owner='Jane')
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Entrypoints for creating AutoML tasks"""
from typing import Union
from azure.ai.ml._restclient.v2022_02_01_preview.models import (
ClassificationPrimaryMetrics,
ClassificationMultilabelPrimaryMetrics,
ObjectDetectionPrimaryMetrics,
InstanceSegmentationPrimaryMetrics,
)
from azure.ai.ml.entities._inputs_outputs import Input
from azure.ai.ml.entities._job.automl.image.image_classification_job import ImageClassificationJob
from azure.ai.ml.entities._job.automl.image.image_classification_multilabel_job import ImageClassificationMultilabelJob
from azure.ai.ml.entities._job.automl.image.image_object_detection_job import ImageObjectDetectionJob
from azure.ai.ml.entities._job.automl.image.image_instance_segmentation_job import ImageInstanceSegmentationJob
from azure.ai.ml.entities._builders.base_node import pipeline_node_decorator
def _create_image_job(
job_cls,
training_data: Input,
target_column_name: str,
primary_metric: Union[str, ClassificationPrimaryMetrics] = None,
validation_data: Input = None,
validation_data_size: float = None,
**kwargs,
):
"""Helper function to create objects for AutoML Image jobs."""
image_job = job_cls(primary_metric=primary_metric, **kwargs)
image_job.set_data(
training_data=training_data,
target_column_name=target_column_name,
validation_data=validation_data,
validation_data_size=validation_data_size,
)
return image_job
@pipeline_node_decorator
def image_classification(
*,
training_data: Input,
target_column_name: str,
primary_metric: Union[str, ClassificationPrimaryMetrics] = None,
validation_data: Input = None,
validation_data_size: float = None,
**kwargs,
) -> ImageClassificationJob:
"""Creates an object for AutoML Image multi-class Classification job.
:param training_data: Training data.
:type training_data: Input
:param target_column_name: Name of the target column.
:type target_column_name: str
:param primary_metric: Primary optimization metric for the task.
:type primary_metric: Union[str, ClassificationPrimaryMetrics]
:param validation_data: Validation data.
:type validation_data: Input
:param validation_data_size: The fraction of training data to be set aside for validation purpose.
:type validation_data_size: float
:param kwargs: A dictionary of additional configuration parameters.
:type kwargs: dict
:return: Image classification job
:rtype: ImageClassificationJob
"""
return _create_image_job(
job_cls=ImageClassificationJob,
training_data=training_data,
target_column_name=target_column_name,
primary_metric=primary_metric,
validation_data=validation_data,
validation_data_size=validation_data_size,
**kwargs,
)
@pipeline_node_decorator
def image_classification_multilabel(
*,
training_data: Input,
target_column_name: str,
primary_metric: Union[str, ClassificationMultilabelPrimaryMetrics] = None,
validation_data: Input = None,
validation_data_size: float = None,
**kwargs,
) -> ImageClassificationMultilabelJob:
"""Creates an object for AutoML Image multi-label Classification job.
:param training_data: Training data.
:type training_data: Input
:param target_column_name: Name of the target column.
:type target_column_name: str
:param primary_metric: Primary optimization metric for the task.
:type primary_metric: Union[str, ClassificationMultilabelPrimaryMetrics]
:param validation_data: Validation data.
:type validation_data: Input
:param validation_data_size: The fraction of training data to be set aside for validation purpose.
:type validation_data_size: float
:param kwargs: A dictionary of additional configuration parameters.
:type kwargs: dict
:return: Image multi-label classification job
:rtype: ImageClassificationMultilabelJob
"""
return _create_image_job(
job_cls=ImageClassificationMultilabelJob,
training_data=training_data,
target_column_name=target_column_name,
primary_metric=primary_metric,
validation_data=validation_data,
validation_data_size=validation_data_size,
**kwargs,
)
@pipeline_node_decorator
def image_object_detection(
*,
training_data: Input,
target_column_name: str,
primary_metric: Union[str, ObjectDetectionPrimaryMetrics] = None,
validation_data: Input = None,
validation_data_size: float = None,
**kwargs,
) -> ImageObjectDetectionJob:
"""Creates an object for AutoML Image Object Detection job.
:param training_data: Training data.
:type training_data: Input
:param target_column_name: Name of the target column.
:type target_column_name: str
:param primary_metric: Primary optimization metric for the task.
:type primary_metric: Union[str, ObjectDetectionPrimaryMetrics]
:param validation_data: Validation data.
:type validation_data: Input
:param validation_data_size: The fraction of training data to be set aside for validation purpose.
:type validation_data_size: float
:param kwargs: A dictionary of additional configuration parameters.
:type kwargs: dict
:return: Image object detection job
:rtype: ImageObjectDetectionJob
"""
return _create_image_job(
job_cls=ImageObjectDetectionJob,
training_data=training_data,
target_column_name=target_column_name,
primary_metric=primary_metric,
validation_data=validation_data,
validation_data_size=validation_data_size,
**kwargs,
)
@pipeline_node_decorator
def image_instance_segmentation(
*,
training_data: Input,
target_column_name: str,
primary_metric: Union[str, InstanceSegmentationPrimaryMetrics] = None,
validation_data: Input = None,
validation_data_size: float = None,
**kwargs,
) -> ImageInstanceSegmentationJob:
"""Creates an object for AutoML Image Instance Segmentation job.
:param training_data: Training data.
:type training_data: Input
:param target_column_name: Name of the target column.
:type target_column_name: str
:param primary_metric: Primary optimization metric for the task.
:type primary_metric: Union[str, InstanceSegmentationPrimaryMetrics]
:param validation_data: Validation data.
:type validation_data: Input
:param validation_data_size: The fraction of training data to be set aside for validation purpose.
:type validation_data_size: float
:param kwargs: A dictionary of additional configuration parameters.
:type kwargs: dict
:return: Image instance segmentation job
:rtype: ImageInstanceSegmentationJob
"""
return _create_image_job(
job_cls=ImageInstanceSegmentationJob,
training_data=training_data,
target_column_name=target_column_name,
primary_metric=primary_metric,
validation_data=validation_data,
validation_data_size=validation_data_size,
**kwargs,
)
|
from __future__ import with_statement, unicode_literals, division, print_function
WITH_STATEMENT = with_statement.compiler_flag
UNICODE_LITERALS = unicode_literals.compiler_flag
DIVISION = division.compiler_flag
PRINT_FUNCTION = print_function.compiler_flag
COMPLETE_FUTURE = WITH_STATEMENT | UNICODE_LITERALS | DIVISION | PRINT_FUNCTION
|
from pymoo.algorithms.online_cluster_nsga3 import OnlineClusterNSGA3
from pymoo.factory import get_problem, get_visualization, get_reference_directions
from pymoo.optimize import minimize
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering, KMeans
class ExperimentOnlineClusterNSGA3(object):
def __init__(self,
ref_dirs,
n_neighbors=20,
decomposition='auto',
prob_neighbor_mating=0.9,
cluster=KMeans,
number_of_clusters=2,
interval_of_aggregations=1,
save_data=True,
problem=get_problem("dtlz1"),
number_of_executions=1,
termination=('n_gen', 100),
use_random_aggregation=False,
save_dir='',
verbose=False,
save_history=True,
use_different_seeds=True,
**kwargs):
self.save_data = save_data
self.problem = problem
self.number_of_executions = number_of_executions
self.termination = termination
self.use_random_aggregation = use_random_aggregation
self.save_dir = save_dir
self.verbose = verbose
self.save_history = save_history
if use_different_seeds:
self.algorithms = [OnlineClusterNSGA3(
ref_dirs,
pop_size=100,
n_neighbors=n_neighbors,
decomposition=decomposition,
prob_neighbor_mating=prob_neighbor_mating,
seed=i,
number_of_clusters=number_of_clusters,
number_of_clusters_for_directions=number_of_clusters,
interval_of_aggregations=interval_of_aggregations,
current_execution_number=i,
use_random_aggregation = use_random_aggregation,
save_dir=self.save_dir,
save_data=self.save_data,
cluster=cluster) for i in range(self.number_of_executions)]
else:
self.algorithms = [OnlineClusterNSGA3(
ref_dirs,
pop_size=100,
n_neighbors=n_neighbors,
decomposition=decomposition,
prob_neighbor_mating=prob_neighbor_mating,
seed=1,
number_of_clusters=number_of_clusters,
number_of_clusters_for_directions=number_of_clusters,
interval_of_aggregations=interval_of_aggregations,
current_execution_number=i,
use_random_aggregation = use_random_aggregation,
save_dir=self.save_dir,
save_data=self.save_data,
cluster=cluster) for i in range(self.number_of_executions)]
def run(self):
results = []
self.current_execution = 1
for algorithm in self.algorithms:
print('CURRENT EXECUTION {}'.format(self.current_execution))
results.append(minimize(
self.problem,
algorithm,
termination=self.termination,
verbose=self.verbose,
save_history=self.save_history))
self.current_execution +=1
def show_heat_map(self):
aggregations = []
for i in range(self.number_of_executions):
aggregations.append(pd.read_csv(os.path.join(self.save_dir,'Execution {}'.format(i), 'aggregations.txt'), header=None))
aggregations = pd.concat(aggregations, axis=1)
aggregations.columns = ['exec_{}'.format(i) for i in range(self.number_of_executions)]
aggregation_list = [aggregations['exec_{}'.format(i)].value_counts().keys().values.tolist()
for i in range(self.number_of_executions)]
unique_aggregations = list(set([j for i in aggregation_list for j in i]))
unique_aggregations.sort(key = lambda x: x.split('-')[1], reverse=True)
data_transposed = aggregations.T
number_of_aggregations = len(unique_aggregations)
number_of_generations = len(aggregations.index)
heat_map = pd.DataFrame(data=np.zeros((number_of_aggregations, number_of_generations)))
heat_map.index = unique_aggregations
for i in range(number_of_generations):
for k,v in data_transposed[i].value_counts().items():
heat_map.at[k, i] = v
for i in range(number_of_generations):
if heat_map[i].values.sum() != self.number_of_executions:
print('Error in generation {}'.format(i))
plt.figure(figsize=(18,10))
sns.heatmap(heat_map.values, yticklabels=heat_map.index.values, cmap="Blues")
plt.xlabel('Generation', fontsize=20)
plt.yticks(fontsize=13)
plt.ylabel('Aggregation', fontsize=20)
plt.title('Aggregation Heat Map', fontsize=20)
plt.savefig(os.path.join(self.save_dir, 'heat_map.pdf'))
plt.show()
def show_mean_convergence(self, file_name):
convergence = pd.DataFrame(self.generate_mean_convergence(file_name)).mean().values
self.save_convergence(file_name, convergence)
plt.figure()
plt.xlabel('Generation', fontsize=20)
plt.ylabel(file_name.split('_')[0], fontsize=20)
plt.plot(convergence)
plt.title('Convergence', fontsize=20)
plt.savefig(os.path.join(self.save_dir, file_name.split('.')[0] + '.pdf'))
plt.show()
def generate_mean_convergence(self, file_name):
return [self.read_data_file(os.path.join(self.save_dir, 'Execution {}'.format(execution), file_name))
for execution in range(self.number_of_executions)]
def read_data_file(self, file_path):
with open(file_path, 'r') as file:
lines = [float(line.replace('\n','')) for line in file.readlines()]
return lines
def save_convergence(self, file_name, convergence):
with open(os.path.join(self.save_dir, 'mean_' + file_name), 'w') as file:
file.write('\n'.join([str(i) for i in convergence]))
|
import numpy as np
import pytest
import blm
from tests.conftest import m_needs_pyplot
@pytest.mark.parametrize("model_param", [dict(m_lo=1.1, m_up=0.9, c_lo=0.1, c_up=0.2)])
@pytest.mark.parametrize("x_init", [None, np.array(0.0)])
def test_constructor(model_param: dict, x_init: np.ndarray):
bl_model = blm.BacklashModel(**model_param, x_init=x_init)
print(bl_model)
assert isinstance(bl_model, blm.BacklashModel)
assert bl_model.m_lo == model_param["m_lo"]
assert bl_model.m_up == model_param["m_up"]
assert bl_model.c_lo == model_param["c_lo"]
assert bl_model.c_up == model_param["c_up"]
if x_init is not None:
assert isinstance(bl_model.z_lo, np.ndarray)
assert isinstance(bl_model.z_up, np.ndarray)
@pytest.mark.parametrize(
"ctor_param",
[
dict(m_lo="invalid", m_up=1.0, c_lo=0.01, c_up=0.01, x_init=0),
dict(m_lo=1.0, m_up="invalid", c_lo=0.01, c_up=0.01, x_init=0),
dict(m_lo=1.0, m_up=1.0, c_lo="invalid", c_up=0.01, x_init=0),
dict(m_lo=1.0, m_up=1.0, c_lo=0.01, c_up="invalid", x_init=0),
dict(m_lo=1.0, m_up=1.0, c_lo=0.01, c_up=0.01, x_init="invalid"),
],
)
def test_nonfloat_ctor_params(ctor_param: dict):
with pytest.raises(ValueError):
blm.BacklashModel(**ctor_param)
@pytest.mark.parametrize(
"ctor_param",
[
dict(m_lo=0, m_up=1.0, c_lo=0.01, c_up=0.01, x_init=0),
dict(m_lo=1.0, m_up=0, c_lo=0.01, c_up=0.01, x_init=0),
dict(m_lo=1.0, m_up=1.0, c_lo=-1, c_up=0.01, x_init=0),
dict(m_lo=1.0, m_up=1.0, c_lo=0.01, c_up=-1, x_init=0),
],
)
def test_invalid_ctor_params(ctor_param: dict):
with pytest.raises(ValueError):
blm.BacklashModel(**ctor_param)
@pytest.mark.parametrize("plot", [pytest.param(True, marks=[m_needs_pyplot, pytest.mark.visual]), False])
@pytest.mark.parametrize("model_param", [dict(m_lo=2.0, m_up=1.9, c_lo=2.5, c_up=2.7)])
@pytest.mark.parametrize("apply_u_bl", [False])
def test_all(model_param: dict, apply_u_bl: bool, plot: bool):
# Setup model
dt = 0.002
t_end = 4.0
t_grid = np.linspace(0, t_end, int(t_end / dt))
amp = 5.0
freq = 1.0
bl_model = blm.BacklashModel(**model_param, x_init=np.array(0.0))
# Generate data
x_hist = []
x_bl_hist = []
for t in t_grid:
x = amp * (1 - t / t_end) * np.sin(2 * np.pi * freq * t)
x_hist.append(x.copy())
x_bl = bl_model(x)
x_bl_hist.append(x_bl.copy())
x_hist = np.expand_dims(np.stack(x_hist), axis=1)
x_bl_hist = np.stack(x_bl_hist)
if plot:
from matplotlib import pyplot as plt
plt.figure(figsize=(16, 8))
plt.plot(x_hist)
plt.plot(x_bl_hist)
plt.grid()
u_grid = np.linspace(-5, 5, 10001)
u_grid_lo = model_param["m_lo"] * (u_grid + model_param["c_lo"])
u_grid_up = model_param["m_up"] * (u_grid - model_param["c_up"])
plt.figure(figsize=(8, 8))
plt.plot(u_grid, u_grid_lo, label="lo")
plt.plot(u_grid, u_grid_up, label="up")
plt.fill_between(u_grid, u_grid_lo, u_grid_up, color="gray", alpha=0.3, label="backlash zone")
plt.gca().axis("equal")
plt.grid()
plt.legend()
plt.show()
# Fit the model parameters
bl_model.fit(u=x_hist[1:], x=x_bl_hist[1:], x_prev=x_bl_hist[:-1])
# Catch invalid shapes
with pytest.raises(ValueError):
bl_model.fit(u=x_hist[2:], x=x_bl_hist[1:], x_prev=x_bl_hist[:-1])
|
# -*- coding: utf-8 -*-
import scrapy
import json
from jobs.items import JobsItem
from urllib import quote
class LagouSpider(scrapy.Spider):
name = 'lagou'
allowed_domains = ['www.lagou.com']
start_urls = ['https://www.lagou.com/jobs/']
positionUrl = 'https://www.lagou.com/jobs/positionAjax.json?'
curPage = 1
city = '上海'
keywords = [
'APP设计师',
'.NET',
'ARM开发',
'ASP',
'Android',
'BD经理',
'BI工程师',
'C',
'C#',
'C++',
'CDN',
'CEO',
'CFO',
'CMO',
'COCOS2D-X',
'COO',
'CTO',
'DB2',
'DBA其它',
'DSP开发',
'Delphi',
'ETL',
'F5',
'FPGA开发',
'Flash',
'Flash设计师',
'Go',
'HRBP',
'HRD/HRM',
'HTML5',
'Hadoop',
'Hive',
'IDC',
'IT支持',
'Java',
'JavaScript',
'MongoDB',
'MySQL',
'Node.js',
'Oracle',
'PCB工艺',
'PHP',
'Perl',
'Python',
'Ruby',
'SEM',
'SEO',
'SQLServer',
'Shell',
'U3D',
'UI设计师',
'VB',
'WEB安全',
'WP',
'html5',
'iOS',
'web前端',
'专利',
'主编',
'交互设计师',
'交互设计总监',
'交互设计经理/主管',
'交易员',
'产品助理',
'产品实习生',
'产品总监',
'产品经理',
'产品运营',
'产品部经理',
'人事/HR',
'人力资源',
'仓储',
'代理商销售',
'企业软件其它',
'会计',
'全栈工程师',
'公关总监',
'内容编辑',
'内容运营',
'出纳',
'分析师',
'前台',
'前端开发其它',
'副主编',
'副总裁',
'功能测试',
'助理',
'单片机',
'原画师',
'合规稽查',
'后端开发其它',
'员工关系',
'品牌公关',
'品类运营',
'售前咨询',
'售前工程师',
'售后客服',
'售后工程师',
'商业数据分析',
'商务总监',
'商务渠道',
'商品经理',
'商家运营',
'图像处理',
'图像识别',
'培训经理',
'多媒体设计师',
'大客户代表',
'媒介经理',
'安全专家',
'实施工程师',
'审计',
'客户代表',
'客服总监',
'客服经理',
'射频工程师',
'嵌入式',
'市场总监',
'市场推广',
'市场策划',
'市场营销',
'市场顾问',
'平面设计师',
'并购',
'并购总监',
'广告协调',
'广告设计师',
'律师',
'性能测试',
'总助',
'手机测试',
'技术合伙人',
'技术总监',
'技术经理',
'投资助理',
'投资总监',
'投资经理',
'投资者关系',
'投资顾问',
'招聘',
'搜索算法',
'政府关系',
'数据产品经理',
'数据仓库',
'数据分析师',
'数据挖掘',
'数据运营',
'文案策划',
'文秘',
'新媒体运营',
'无线交互设计师',
'无线产品设计师',
'机器学习',
'机器视觉',
'材料工程师',
'架构师',
'模具设计',
'法务',
'活动策划',
'活动运营',
'测试其它',
'测试工程师',
'测试开发',
'测试总监',
'测试经理',
'海外市场',
'海外运营',
'淘宝客服',
'深度学习',
'清算',
'渠道销售',
'游戏制作人',
'游戏动作',
'游戏场景',
'游戏数值策划',
'游戏测试',
'游戏特效',
'游戏界面设计师',
'游戏策划',
'游戏角色',
'游戏运营',
'灰盒测试',
'热传导',
'物流',
'理财顾问',
'用户研究员',
'用户研究总监',
'用户研究经理/主管',
'用户运营',
'电商产品经理',
'电话销售',
'电路设计',
'病毒分析',
'白盒测试',
'硬件',
'硬件交互设计师',
'硬件开发其它',
'硬件测试',
'移动产品经理',
'移动开发其它',
'税务',
'算法工程师',
'精准推荐',
'精益工程师',
'系统安全',
'系统工程师',
'系统管理员',
'系统集成',
'结算',
'绩效考核经理',
'网店运营',
'网络安全',
'网络工程师',
'网络推广',
'网络营销',
'网页交互设计师',
'网页产品经理',
'网页产品设计师',
'网页设计师',
'美术设计师(2D/3D)',
'自动化',
'自动化测试',
'自然语言处理',
'薪资福利经理',
'融资',
'融资总监',
'行业研究',
'行政',
'行政总监/经理',
'视觉设计师',
'视觉设计总监',
'视觉设计经理/主管',
'记者',
'设计总监',
'设计经理/主管',
'语音识别',
'财务',
'财务总监/经理',
'资产管理',
'资信评估',
'运维其它',
'运维工程师',
'运维开发工程师',
'运维总监',
'运维经理',
'运营专员',
'运营总监',
'运营经理',
'采购专员',
'采购总监',
'采购经理',
'销售专员',
'销售助理',
'销售总监',
'销售经理',
'销售顾问',
'项目助理',
'项目总监',
'项目经理',
'风控',
'风控总监',
'驱动开发',
'高端技术职位其它',
'黑盒测试',
]
kd_cur = 0
keyword = keywords[0]
item_fns = [
'city', 'education', 'workYear', 'salary', 'firstType', 'secondType',
'positionName', 'positionAdvantage', 'positionLables',
'companyFullName', 'companyShortName', 'companySize',
'companyLabelList', 'financeStage', 'industryField', 'industryLables',
'district', 'isSchoolJob', 'jobNature', 'createTime'
]
headers = {
'origin':
"https://www.lagou.com",
'x-anit-forge-code':
"0",
'user-agent':
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36",
'content-type':
"application/x-www-form-urlencoded",
'accept':
"application/json, text/javascript, */*; q=0.01",
'x-requested-with':
"XMLHttpRequest",
'x-anit-forge-token':
"None",
'dnt':
"1",
'accept-encoding':
"gzip, deflate, br",
'accept-language':
"zh-CN,zh;q=0.8,en;q=0.6",
'cookie':
"user_trace_token=20170728162449-adcee15cc85848189cfb891619b80998; LGUID=20170728162451-3a327df6-736e-11e7-b9bc-5254005c3644; _gat=1; PRE_UTM=; PRE_HOST=; PRE_SITE=https%3A%2F%2Fwww.lagou.com%2Fgongsi%2F; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2F; LGSID=20170807221703-15e8d3a5-7b7b-11e7-839b-5254005c3644; index_location_city=%E5%85%A8%E5%9B%BD; JSESSIONID=ABAAABAACBHABBI1DE966CF9357E8BD2D82C53257584955; X_HTTP_TOKEN=2274185a2011ec03f73ab98f8ceaf490; TG-TRACK-CODE=index_navigation; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1501230293; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1503803797; _ga=GA1.2.1475031428.1501230295; LGRID=20170827111636-22b244c2-8ad6-11e7-8f3c-5254005c3644; SEARCH_ID=52d0e756ac5d41b395d4a9e57ab74b72; user_trace_token=20170728162449-adcee15cc85848189cfb891619b80998; LGUID=20170728162451-3a327df6-736e-11e7-b9bc-5254005c3644; index_location_city=%E5%85%A8%E5%9B%BD; user_trace_token=20170728162449-adcee15cc85848189cfb891619b80998; LGUID=20170728162451-3a327df6-736e-11e7-b9bc-5254005c3644; _gat=1; PRE_UTM=; PRE_HOST=; PRE_SITE=https%3A%2F%2Fwww.lagou.com%2Fgongsi%2F; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2F; LGSID=20170807221703-15e8d3a5-7b7b-11e7-839b-5254005c3644; index_location_city=%E5%85%A8%E5%9B%BD; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1501230293; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1503587852; _ga=GA1.2.1475031428.1501230295; LGRID=20170824231731-59096c0b-88df-11e7-8ea0-5254005c3644; JSESSIONID=ABAAABAACBHABBI1DE966CF9357E8BD2D82C53257584955; X_HTTP_TOKEN=2274185a2011ec03f73ab98f8ceaf490; TG-TRACK-CODE=index_navigation; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1501230293; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1503807054; _ga=GA1.2.1475031428.1501230295; LGSID=20170827121054-b83e61a1-8add-11e7-8f3c-5254005c3644; PRE_UTM=; PRE_HOST=; PRE_SITE=https%3A%2F%2Fwww.lagou.com%2Fzhaopin%2Fheiheceshi%2F%3FlabelWords%3Dlabel; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2Fjobs%2Flist_Go%3Fcity%3D%25E5%2585%25A8%25E5%259B%25BD%26cl%3Dfalse%26fromSearch%3Dtrue%26labelWords%3D%26suginput%3D; LGRID=20170827121054-b83e632d-8add-11e7-8f3c-5254005c3644; SEARCH_ID=ced77b843c064940a3c12c51d8720ca1",
}
def start_requests(self):
return [self.next_request()]
def parse(self, response):
jdict = json.loads(response.body)
jcontent = jdict['content']
jposresult = jcontent['positionResult']
jresult = jposresult['result']
resultSize = int(jposresult['resultSize'])
pageSize = int(jcontent['pageSize'])
print('[lagou][%s]resultSize: %d, pageNo: %d, pageSize: %d' %
(self.keyword, resultSize, self.curPage, pageSize))
for entry in jresult:
if len(entry) < 10:
continue
item = JobsItem()
item['keyword'] = self.keyword
item['pid'] = self.keyword + "_" + str(
entry['positionId']) + "_" + str(entry['publisherId'])
for fn in self.item_fns:
item[fn] = entry[fn]
yield item
if pageSize <= resultSize:
self.curPage += 1
yield self.next_request()
elif self.kd_cur < len(self.keywords) - 1:
self.curPage = 1
self.kd_cur += 1
self.keyword = self.keywords[self.kd_cur]
yield self.next_request()
def next_request(self):
self.headers['referer'] = "https://www.lagou.com/jobs/list_" + quote(
self.keyword
) + "?city=%E5%85%A8%E5%9B%BD&cl=false&fromSearch=true&labelWords=&suginput="
return scrapy.http.FormRequest(
self.positionUrl,
headers=self.headers,
formdata={'pn': str(self.curPage),
'kd': self.keyword},
callback=self.parse)
|
# -*- coding: utf-8 -*-
"""\
Differnt output tests.
"""
from __future__ import unicode_literals, absolute_import
import io
from nose.tools import eq_
from segno_mimos import pyqrcode
try:
from .test_eps import eps_as_matrix
from .test_png import png_as_matrix
from .test_svg import svg_as_matrix
except (ValueError, SystemError): # Attempted relative import in non-package
from test_eps import eps_as_matrix
from test_png import png_as_matrix
from test_svg import svg_as_matrix
_DATA = (
# Input string, error level, quiet_zone
('Märchenbuch', 'M', 4),
(123, 'H', 0),
('http:/www.example.org/', 'L', 3),
('Hello\nWorld', 'Q', 2),
('HELLO WORLD', 'H', 2),
('Up Jumped The Devil', 'M', 4)
)
def test_data():
# Creates a QR code, serializes it and checks if the serialization
# corresponds to the initial QR code matrix.
def check(serializer_name, buffer_factory, to_matrix_func, data, error, quiet_zone):
"""\
`serializer_name`
Method name to serialize the QR code
`buffer_factory`
Callable to construct the buffer.
`to_matrix_func`
Function to convert the buffer back to a matrix.
`data`
The input to construct the QR code.
`error`
ECC level
`quiet_zone`
quiet_zone size.
"""
qr = pyqrcode.create(data, error=error)
out = buffer_factory()
meth = getattr(qr, serializer_name)
meth(out, quiet_zone=quiet_zone)
matrix = to_matrix_func(out, quiet_zone)
eq_(qr.code, matrix)
for meth_name, buffer_factory, to_matrix_func in (# Segno's EPS implementation differs
#('eps', io.StringIO, eps_as_matrix),
('png', io.BytesIO, png_as_matrix),
('svg', io.BytesIO, svg_as_matrix)):
for data, error, quiet_zone in _DATA:
yield check, meth_name, buffer_factory, to_matrix_func, data, error, quiet_zone
if __name__ == '__main__':
import nose
nose.core.runmodule()
|
from . import cv_processing
from .camera_opencv import Camera
import os
from flask import Flask, redirect, render_template, Response, send_from_directory
from .disc import d as discerd
from .blueprints.clocker.inout_blueprint import in_page
import psycopg2
app = Flask(__name__.split(".")[0])
app.register_blueprint(in_page, url_prefix="/tick_tock")
# return floats instead of Decimals
DEC2FLOAT = psycopg2.extensions.new_type(
psycopg2.extensions.DECIMAL.values,
"DEC2FLOAT",
lambda value, curs: float(value) if value is not None else None,
)
psycopg2.extensions.register_type(DEC2FLOAT)
@app.route("/")
def index():
return render_template("index.html")
def gen(c):
while True:
d_frame = cv_processing.draw_debugs_jpegs(c.get_frame()[1])
yield (b"--frame\r\nContent-Type: image/jpeg\r\n\r\n" + d_frame + b"\r\n")
@app.route("/video_feed")
def feed():
"""Streaming route (img src)"""
return Response(gen(Camera()), mimetype="multipart/x-mixed-replace; boundary=frame")
@app.route("/favicon.ico")
def favicon():
return send_from_directory(os.path.join(app.root_path, "static"), "favicon.ico")
@app.template_filter()
def int_to_hexcolor(i) -> str:
return "#" + (hex(i)[2:] if i != 0 else "FFFFFF")
|
from .followers import *
from .posts import *
from .authors import *
from .likes import *
from .inbox import *
from .comments import *
from .auth import *
|
import pkg_resources
import unittest
import gzip
import goenrich
class TestRead(unittest.TestCase):
def test_ontology(self):
G = goenrich.obo.ontology('db/go-basic.obo')
def test_ontology_from_file_obj(self):
with open('db/go-basic.obo') as f:
G = goenrich.obo.ontology(f)
self.assertFalse(f.closed)
def test_goa(self):
background = goenrich.read.goa('db/gene_association.goa_human.gaf.gz')
def test_goa_from_file_obj(self):
with gzip.GzipFile('db/gene_association.goa_human.gaf.gz') as f:
background = goenrich.read.goa(f)
self.assertFalse(f.closed)
def test_gene2go(self):
background = goenrich.read.gene2go('db/gene2go.gz')
def test_gene2go_from_file_obj(self):
with gzip.GzipFile('db/gene2go.gz') as f:
background = goenrich.read.gene2go(f)
self.assertFalse(f.closed)
def test_goslim_from_file(self):
G = goenrich.obo.ontology(pkg_resources.resource_filename(goenrich.__name__, 'tests/test_ontologies/goslim_generic.obo'))
self.assertEqual(len(G.nodes()), 150)
self.assertSetEqual(set(G.successors('GO:0009056')), set(['GO:0008150']))
self.assertSetEqual(set(G.predecessors('GO:0009056')), set(['GO:0034655', 'GO:0006914']))
if __name__ == '__main__':
unittest.main()
|
import pandas as pd
import numpy as np
import os
import re
# User Input
inputfile = input('What is the directory of your input file eg... D:/FILES/ALS_DRINKING-ALGAE_WATER_201807101206.xlsx: ')
seperator = input('what is the delimiter for cells output? eg: | or , : ' )
columntosplit = input('what is the heading for the column you want to split the data by?: ')
# Read File
df = pd.read_excel(inputfile, index=False)
df_column = df[columntosplit]
# The number of unique items in the column you chose
unique_values = df_column.unique()
numberofunique = len(unique_values)
print ('number of files to generate:', numberofunique)
# Create Files
for i in range(numberofunique):
bools = df[columntosplit] == unique_values[i]
df_temp = df[bools]
filetag = re.sub('[^A-Za-z0-9]+', '-', unique_values[i])
filename, file_extension = os.path.splitext(inputfile)
filename = filename + "_" + filetag + ".csv"
df_temp.to_csv(filename, sep=seperator, index=False)
|
#!/usr/bin/python
from beem import Hive, Steem
from beem.account import Account
from beem.amount import Amount
from beem.block import Block
from beem.nodelist import NodeList
import pandas as pd
def init():
data = {'exchange_name': [],
'account_name' : [],
'trade_date':[],
'buy_asset':[],
'sell_asset':[],
'buy_amount': [],
'sell_amount': [],
'exchange_order_id': [],
'fee':[],
'fee_asset': [],
'transaction_type': [],
'clarification': []}
return data
def add_trade(data, timestamp, amount_in, amount_out, data_account_name, clarification="", exchange_order_id=""):
data["exchange_name"].append("generic")
data["account_name"].append(data_account_name)
data["trade_date"].append(timestamp)
data["exchange_order_id"].append(exchange_order_id)
data["fee"].append("")
data["fee_asset"].append("")
data["clarification"].append(clarification)
data["buy_asset"].append(amount_in.symbol)
data["buy_amount"].append(float(amount_in))
data["transaction_type"].append("trade")
data["sell_amount"].append(float(amount_out))
data["sell_asset"].append(amount_out.symbol)
return data
def add_deposit(data, timestamp, amount, data_account_name, clarification="", exchange_order_id=""):
data["exchange_name"].append("generic")
data["account_name"].append(data_account_name)
data["trade_date"].append(timestamp)
data["exchange_order_id"].append(exchange_order_id)
data["fee"].append("")
data["fee_asset"].append("")
data["clarification"].append(clarification)
data["buy_asset"].append(amount.symbol)
data["buy_amount"].append(float(amount))
data["transaction_type"].append("deposit")
data["sell_amount"].append("")
data["sell_asset"].append("")
return data
def add_withdrawal(data, timestamp, amount, data_account_name, clarification="", exchange_order_id=""):
data["exchange_name"].append("generic")
data["account_name"].append(data_account_name)
data["trade_date"].append(timestamp)
data["exchange_order_id"].append(exchange_order_id)
data["fee"].append("")
data["fee_asset"].append("")
data["clarification"].append(clarification)
data["buy_asset"].append("")
data["buy_amount"].append("")
data["transaction_type"].append("withdrawal")
data["sell_amount"].append(float(amount))
data["sell_asset"].append(amount.symbol)
return data
def store(filename, data, sheet_name='Sheet1'):
sell_asset2 = []
buy_asset2 = []
for a in data["sell_asset"]:
if a == "SBD":
sell_asset2.append("SBD2")
else:
sell_asset2.append(a)
for a in data["buy_asset"]:
if a == "SBD":
buy_asset2.append("SBD2")
else:
buy_asset2.append(a)
data["buy_asset"] = buy_asset2
data["sell_asset"] = sell_asset2
df = pd.DataFrame(data)
writer = pd.ExcelWriter(filename, engine='xlsxwriter')
df.to_excel(writer, sheet_name=sheet_name, startrow=0, header=True, index=False)
writer.save()
if __name__ == "__main__":
nodelist = NodeList()
nodelist.update_nodes()
# stm = Steem(node=nodelist.get_steem_nodes())
stm = Hive(node=nodelist.get_hive_nodes())
print(stm)
account_name = "holger80"
data_account_name = "hive_holger80_powered_up"
symbol = "HIVE"
hive_fork_block = 41818753
has_fork = True
limit_to_year = True
current_year = 2020
xls_filename = "%s_%d.xlsx" % (data_account_name, current_year)
account = Account(account_name, blockchain_instance=stm)
ops_dict = {}
_ids = {}
for ops in account.history():
ops_dict[ops["index"]] = ops
if ops["_id"] in _ids:
_ids[ops["_id"]] += 1
else:
_ids[ops["_id"]] = 1
duplicate_indices = []
_id_list = []
for _id in sorted(list(ops_dict.keys())):
ops = ops_dict[_id]
if _ids[ops["_id"]] == 1:
continue
if ops["_id"] not in _id_list:
_id_list.append(ops["_id"])
else:
trx_id = ops["trx_id"]
if trx_id == "0000000000000000000000000000000000000000":
duplicate_indices.append(ops["index"])
else:
block = Block(ops["block"], blockchain_instance=stm)
count_ops = 0
for t in block.transactions:
if t["transaction_id"] != trx_id:
continue
for o in t["operations"]:
count_ops += 1
if count_ops < _ids[ops["_id"]]:
duplicate_indices.append(ops["index"])
type_count = {}
for _id in sorted(list(ops_dict.keys())):
ops = ops_dict[_id]
if ops["type"] in type_count:
type_count[ops["type"]] += 1
else:
type_count[ops["type"]] = 1
symbol_amount = 0
backed_symbol_amount = 0
index = 0
hard_fork_reached = False
year_reached = False
next_year_reached = False
data = init()
print("duplicate indices %d" % len(duplicate_indices))
for _id in sorted(list(ops_dict.keys())):
ops = ops_dict[_id]
if _id in duplicate_indices:
continue
block = ops["block"]
timestamp = ops["timestamp"].replace("T", " ")
if limit_to_year and not year_reached and timestamp[:4] == str(current_year):
if has_fork and hard_fork_reached:
year_reached = True
elif has_fork:
year_reached = False
else:
year_reached = True
if year_reached and symbol_amount > 0:
amount = Amount(symbol_amount, symbol, blockchain_instance=stm)
data = add_deposit(data, "%d-01-01 00:00:00" % current_year, amount, data_account_name,
exchange_order_id="Virtual transfer to %d" % current_year)
if limit_to_year and not next_year_reached and timestamp[:4] == str(current_year + 1):
year_reached = True
next_year_reached = False
if symbol_amount > 0:
amount = Amount(symbol_amount, symbol, blockchain_instance=stm)
data = add_withdrawal(data, "%d-01-01 00:00:00" % (current_year + 1), amount,
data_account_name, exchange_order_id="Virtual transfer to %d" % (current_year + 1))
elif limit_to_year and next_year_reached:
continue
if has_fork and block > hive_fork_block and not hard_fork_reached:
amount = Amount(symbol_amount, symbol, blockchain_instance=stm)
data = add_deposit(data, timestamp, amount, data_account_name, exchange_order_id="Hard fork")
hard_fork_reached = True
if limit_to_year and not year_reached and timestamp[:4] == str(current_year):
year_reached = True
if ops["type"] == "transfer_to_vesting":
amount = Amount(ops["amount"], blockchain_instance=stm)
if ops["to"] == account_name:
symbol_amount += float(amount)
index += 1
if has_fork and block < hive_fork_block:
continue
if limit_to_year and not year_reached:
continue
data = add_deposit(data, timestamp, amount, data_account_name, exchange_order_id="Power up")
elif ops["type"] == "fill_vesting_withdraw":
amount = Amount(ops["deposited"], blockchain_instance=stm)
if ops["from_account"] == account_name:
symbol_amount -= float(amount)
index += 1
if has_fork and block < hive_fork_block:
continue
if limit_to_year and not year_reached:
continue
if symbol_amount < 0:
data = add_deposit(data, timestamp, Amount(-symbol_amount, symbol, blockchain_instance=stm),
data_account_name, exchange_order_id="Staking reward", clarification="staking")
symbol_amount += (-round(symbol_amount, 3))
data = add_withdrawal(data, timestamp, amount, data_account_name, exchange_order_id="Powering down")
print("%d entries" % index)
print("%s - %.3f %s" % (timestamp, symbol_amount, symbol))
store(xls_filename, data)
|
from django.urls import path
from .views import CarView, RateView, PopularView, CarViewSet, CarsRatingsViewSet
from rest_framework.routers import DefaultRouter
app_name = "cars"
router = DefaultRouter()
router.register(r'cars', CarViewSet)
router.register(r'cars', CarsRatingsViewSet)
urlpatterns = router.urls
urlpatterns = [
path('cars/', CarView.as_view(), name="cars_list"),
path('cars/<int:id>/', CarView.as_view(), name='del_cars'),
path('rate/', RateView.as_view(), name="rate"),
path('popular/', PopularView.as_view(), name="popular")
]
|
import os
from pathlib import Path
from dotenv import load_dotenv
load_dotenv()
TELEGRAM_TOKEN = os.getenv('TELEGRAM_TOKEN')
CHAT_ID = os.getenv('CHAT_ID')
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-i6o5!@e5(!*exd6xpv7-+)1p!3=k_2#fb6xhsb&*3y*&z6vknt'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'crm',
'accounts',
'crispy_forms',
'django_cleanup',
'django_filters',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES_DIR = os.path.join(BASE_DIR, 'templates')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = os.path.join(BASE_DIR, 'sent_emails')
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_URL = '/auth/login/'
LOGIN_REDIRECT_URL = 'dashboard'
LOGOUT_REDIRECT_URL = 'home'
USER = 'user'
REPAIR_SPECIALIST = 'repair'
SERVICE_SPECIALIST = 'service'
CONSULTANT_SPECIALIST = 'consultant'
ROLES = [
(REPAIR_SPECIALIST, 'Специалист по ремонту'),
(SERVICE_SPECIALIST, 'Специалист по обслуживанию'),
(CONSULTANT_SPECIALIST, 'Консультант'),
(USER, 'Пользователь без доступа в CRM'),
]
REPAIR = 'repair'
SERVICE = 'service'
CONSULTATION = 'consultant'
TYPE = [
(REPAIR, 'Заявка на ремонт'),
(SERVICE, 'Заявка на обслуживание'),
(CONSULTATION, 'Заявка на консультацию'),
]
OPEN = 'open'
WORK = 'work'
CLOSE = 'close'
STATUS = [
(OPEN, 'Открыта'),
(WORK, 'В работе'),
(CLOSE, 'Закрыта')
]
|
#coding=utf-8
import os
def rename():
path=input("请输入路径(例如D:\\\\Jason):")
name=input("请输入开头名:")
startNumber=input("请输入开始数:")
fileType=input("请输入后缀名(如 .jpg、.txt等等):")
endSplit = input("请输入分隔符(如 佛系小吴_01_ 01 _.ext) : ")
print("正在生成以"+name+startNumber+fileType+"迭代的文件名")
count=0
filelist=os.listdir(path)
for files in filelist:
Olddir=os.path.join(path,files)
if os.path.isdir(Olddir) or not files.endswith(fileType):
continue
Newdir=os.path.join(path,name+str(count+int(startNumber))+endSplit+fileType)
os.rename(Olddir,Newdir)
count+=1
print("一共修改了"+str(count)+"个文件")
if __name__ == '__main__':
rename()
|
import setuptools
VERSION = "20.6.0"
TITLE = "aragog"
DESCRIPTION = "A better python scraper."
URL = "https://www.cameroncairns.com/"
DOC = DESCRIPTION + " <" + URL + ">"
AUTHOR = "Cameron Cairns"
AUTHOR_EMAIL = "cameron@cameroncairns.com"
LICENSE = "Apache License 2.0"
COPYRIGHT = "Copyright (c) 2020 Cameron Cairns"
NAME = "aragog"
INSTALL_REQUIRES = []
PYTHON_REQUIRES = ">=3.6"
KEYWORDS = ["webscrape", "webcrawl", "crawl", "scrape"]
CLASSIFIERS = [
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
]
def get_description():
with open("README.rst", "r") as readme:
long_description = readme.read()
return long_description
if __name__ == "__main__":
setuptools.setup(
zip_safe=False,
long_description=get_description(),
long_description_content_type="text/x-rst",
packages=setuptools.find_packages(where="src"),
package_dir={"": "src"},
name=NAME,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=AUTHOR,
maintainer_email=AUTHOR_EMAIL,
keywords=KEYWORDS,
python_requires=PYTHON_REQUIRES,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
)
|
import requests
import pandas as pd
import ftplib
import io
import re
import json
try:
from requests_html import HTMLSession
except Exception:
print("""Warning - Certain functionality
requires requests_html, which is not installed.
Install using:
pip install requests_html
After installation, you may have to restart your Python session.""")
base_url = "https://query1.finance.yahoo.com/v8/finance/chart/"
def build_url(ticker, start_date = None, end_date = None, interval = "1d"):
if end_date is None:
end_seconds = int(pd.Timestamp("now").timestamp())
else:
end_seconds = int(pd.Timestamp(end_date).timestamp())
if start_date is None:
start_seconds = 7223400
else:
start_seconds = int(pd.Timestamp(start_date).timestamp())
site = base_url + ticker
#"{}/v8/finance/chart/{}".format(self._base_url, self.ticker)
params = {"period1": start_seconds, "period2": end_seconds,
"interval": interval.lower(), "events": "div,splits"}
return site, params
def force_float(elt):
try:
return float(elt)
except:
return elt
def get_data(ticker, start_date = None, end_date = None, index_as_date = True,
interval = "1d"):
'''Downloads historical stock price data into a pandas data frame. Interval
must be "1d", "1wk", or "1mo" for daily, weekly, or monthly data.
@param: ticker
@param: start_date = None
@param: end_date = None
@param: index_as_date = True
@param: interval = "1d"
'''
if interval not in ("1d", "1wk", "1mo"):
raise AssertionError("interval must be of of '1d', '1wk', or '1mo'")
# build and connect to URL
site, params = build_url(ticker, start_date, end_date, interval)
resp = requests.get(site, params = params)
if not resp.ok:
raise AssertionError(resp.json())
# get JSON response
data = resp.json()
# get open / high / low / close data
frame = pd.DataFrame(data["chart"]["result"][0]["indicators"]["quote"][0])
# add in adjclose
frame["adjclose"] = data["chart"]["result"][0]["indicators"]["adjclose"][0]["adjclose"]
# get the date info
temp_time = data["chart"]["result"][0]["timestamp"]
frame.index = pd.to_datetime(temp_time, unit = "s")
frame.index = frame.index.map(lambda dt: dt.floor("d"))
frame = frame[["open", "high", "low", "close", "adjclose", "volume"]]
frame['ticker'] = ticker.upper()
if not index_as_date:
frame = frame.reset_index()
frame.rename(columns = {"index": "date"}, inplace = True)
return frame
def tickers_sp500():
'''Downloads list of tickers currently listed in the S&P 500 '''
# get list of all S&P 500 stocks
sp500 = pd.read_html("https://en.wikipedia.org/wiki/List_of_S%26P_500_companies")[0]
sp_tickers = sorted(sp500.Symbol.tolist())
return sp_tickers
def tickers_nasdaq():
'''Downloads list of tickers currently listed in the NASDAQ'''
ftp = ftplib.FTP("ftp.nasdaqtrader.com")
ftp.login()
ftp.cwd("SymbolDirectory")
r = io.BytesIO()
ftp.retrbinary('RETR nasdaqlisted.txt', r.write)
info = r.getvalue().decode()
splits = info.split("|")
tickers = [x for x in splits if "\r\n" in x]
tickers = [x.split("\r\n")[1] for x in tickers if "NASDAQ" not in x != "\r\n"]
tickers = [ticker for ticker in tickers if "File" not in ticker]
ftp.close()
return tickers
def tickers_other():
'''Downloads list of tickers currently listed in the "otherlisted.txt"
file on "ftp.nasdaqtrader.com" '''
ftp = ftplib.FTP("ftp.nasdaqtrader.com")
ftp.login()
ftp.cwd("SymbolDirectory")
r = io.BytesIO()
ftp.retrbinary('RETR otherlisted.txt', r.write)
info = r.getvalue().decode()
splits = info.split("|")
tickers = [x for x in splits if "\r\n" in x]
tickers = [x.split("\r\n")[1] for x in tickers]
tickers = [ticker for ticker in tickers if "File" not in ticker]
ftp.close()
return tickers
def tickers_dow():
'''Downloads list of currently traded tickers on the Dow'''
site = "https://finance.yahoo.com/quote/%5EDJI/components?p=%5EDJI"
table = pd.read_html(site)[0]
dow_tickers = sorted(table['Symbol'].tolist())
return dow_tickers
def tickers_ibovespa():
'''Downloads list of currently traded tickers on the Ibovespa, Brazil'''
ibovespa_tickers = pd.read_html("https://pt.wikipedia.org/wiki/Lista_de_companhias_citadas_no_Ibovespa")[0]
ibovespa_tickers.columns = ["Symbol", "Share", "Sector", "Type", "Site"]
ibovespa_tickers = sorted(ibovespa_tickers.Symbol.tolist())
return ibovespa_tickers
def get_quote_table(ticker , dict_result = True):
'''Scrapes data elements found on Yahoo Finance's quote page
of input ticker
@param: ticker
@param: dict_result = True
'''
site = "https://finance.yahoo.com/quote/" + ticker + "?p=" + ticker
tables = pd.read_html(site)
data = tables[0].append(tables[1])
data.columns = ["attribute" , "value"]
price_etc = [elt for elt in tables if elt.iloc[0][0] == "Previous Close"][0]
price_etc.columns = data.columns.copy()
data = data.append(price_etc)
quote_price = pd.DataFrame(["Quote Price", get_live_price(ticker)]).transpose()
quote_price.columns = data.columns.copy()
data = data.append(quote_price)
data = data.sort_values("attribute")
data = data.drop_duplicates().reset_index(drop = True)
data["value"] = data.value.map(force_float)
if dict_result:
result = {key : val for key,val in zip(data.attribute , data.value)}
return result
return data
def get_stats(ticker):
'''Scrapes information from the statistics tab on Yahoo Finance
for an input ticker
@param: ticker
'''
stats_site = "https://finance.yahoo.com/quote/" + ticker + \
"/key-statistics?p=" + ticker
tables = pd.read_html(stats_site)
tables = [table for table in tables[1:] if table.shape[1] == 2]
table = tables[0]
for elt in tables[1:]:
table = table.append(elt)
table.columns = ["Attribute" , "Value"]
table = table.reset_index(drop = True)
return table
def get_stats_valuation(ticker):
'''Scrapes Valuation Measures table from the statistics tab on Yahoo Finance
for an input ticker
@param: ticker
'''
stats_site = "https://finance.yahoo.com/quote/" + ticker + \
"/key-statistics?p=" + ticker
tables = pd.read_html(stats_site)
tables = [table for table in tables if "Trailing P/E" in table.iloc[:,0].tolist()]
table = tables[0].reset_index(drop = True)
return table
def _parse_json(url):
html = requests.get(url=url).text
json_str = html.split('root.App.main =')[1].split(
'(this)')[0].split(';\n}')[0].strip()
data = json.loads(json_str)[
'context']['dispatcher']['stores']['QuoteSummaryStore']
# return data
new_data = json.dumps(data).replace('{}', 'null')
new_data = re.sub(r'\{[\'|\"]raw[\'|\"]:(.*?),(.*?)\}', r'\1', new_data)
json_info = json.loads(new_data)
return json_info
def _parse_table(json_info):
df = pd.DataFrame(json_info)
del df["maxAge"]
df.set_index("endDate", inplace=True)
df.index = pd.to_datetime(df.index, unit="s")
df = df.transpose()
df.index.name = "Breakdown"
return df
def get_income_statement(ticker, yearly = True):
'''Scrape income statement from Yahoo Finance for a given ticker
@param: ticker
'''
income_site = "https://finance.yahoo.com/quote/" + ticker + \
"/financials?p=" + ticker
json_info = _parse_json(income_site)
if yearly:
temp = json_info["incomeStatementHistory"]["incomeStatementHistory"]
else:
temp = json_info["incomeStatementHistoryQuarterly"]["incomeStatementHistory"]
return _parse_table(temp)
def get_balance_sheet(ticker, yearly = True):
'''Scrapes balance sheet from Yahoo Finance for an input ticker
@param: ticker
'''
balance_sheet_site = "https://finance.yahoo.com/quote/" + ticker + \
"/balance-sheet?p=" + ticker
json_info = _parse_json(balance_sheet_site)
if yearly:
temp = json_info["balanceSheetHistory"]["balanceSheetStatements"]
else:
temp = json_info["balanceSheetHistoryQuarterly"]["balanceSheetStatements"]
return _parse_table(temp)
def get_cash_flow(ticker, yearly = True):
'''Scrapes the cash flow statement from Yahoo Finance for an input ticker
@param: ticker
'''
cash_flow_site = "https://finance.yahoo.com/quote/" + \
ticker + "/cash-flow?p=" + ticker
json_info = _parse_json(cash_flow_site)
if yearly:
temp = json_info["cashflowStatementHistory"]["cashflowStatements"]
else:
temp = json_info["cashflowStatementHistoryQuarterly"]["cashflowStatements"]
return _parse_table(temp)
def get_financials(ticker, yearly = True, quarterly = True):
'''Scrapes financials data from Yahoo Finance for an input ticker, including
balance sheet, cash flow statement, and income statement. Returns dictionary
of results.
@param: ticker
@param: yearly = True
@param: quarterly = True
'''
if not yearly and not quarterly:
raise AssertionError("yearly or quarterly must be True")
financials_site = "https://finance.yahoo.com/quote/" + ticker + \
"/financials?p=" + ticker
json_info = _parse_json(financials_site)
result = {}
if yearly:
temp = json_info["incomeStatementHistory"]["incomeStatementHistory"]
table = _parse_table(temp)
result["yearly_income_statement"] = table
temp = json_info["balanceSheetHistory"]["balanceSheetStatements"]
table = _parse_table(temp)
result["yearly_balance_sheet"] = table
temp = json_info["cashflowStatementHistory"]["cashflowStatements"]
table = _parse_table(temp)
result["yearly_cash_flow"] = table
if quarterly:
temp = json_info["incomeStatementHistoryQuarterly"]["incomeStatementHistory"]
table = _parse_table(temp)
result["quarterly_income_statement"] = table
temp = json_info["balanceSheetHistoryQuarterly"]["balanceSheetStatements"]
table = _parse_table(temp)
result["quarterly_balance_sheet"] = table
temp = json_info["cashflowStatementHistoryQuarterly"]["cashflowStatements"]
table = _parse_table(temp)
result["quarterly_cash_flow"] = table
return result
def get_holders(ticker):
'''Scrapes the Holders page from Yahoo Finance for an input ticker
@param: ticker
'''
holders_site = "https://finance.yahoo.com/quote/" + \
ticker + "/holders?p=" + ticker
tables = pd.read_html(holders_site , header = 0)
table_names = ["Major Holders" , "Direct Holders (Forms 3 and 4)" ,
"Top Institutional Holders" , "Top Mutual Fund Holders"]
table_mapper = {key : val for key,val in zip(table_names , tables)}
return table_mapper
def get_analysts_info(ticker):
'''Scrapes the Analysts page from Yahoo Finance for an input ticker
@param: ticker
'''
analysts_site = "https://finance.yahoo.com/quote/" + ticker + \
"/analysts?p=" + ticker
tables = pd.read_html(analysts_site , header = 0)
table_names = [table.columns[0] for table in tables]
table_mapper = {key : val for key , val in zip(table_names , tables)}
return table_mapper
def get_live_price(ticker):
'''Gets the live price of input ticker
@param: ticker
'''
df = get_data(ticker, end_date = pd.Timestamp.today() + pd.DateOffset(10))
return df.close[-1]
def _raw_get_daily_info(site):
session = HTMLSession()
resp = session.get(site)
tables = pd.read_html(resp.html.raw_html)
df = tables[0].copy()
df.columns = tables[0].columns
del df["52 Week Range"]
df["% Change"] = df["% Change"].map(lambda x: float(x.strip("%+").replace(",", "")))
fields_to_change = [x for x in df.columns.tolist() if "Vol" in x \
or x == "Market Cap"]
for field in fields_to_change:
if type(df[field][0]) == str:
df[field] = df[field].str.strip("B").map(force_float)
df[field] = df[field].map(lambda x: x if type(x) == str
else x * 1000000000)
df[field] = df[field].map(lambda x: x if type(x) == float else
force_float(x.strip("M")) * 1000000)
session.close()
return df
def get_day_most_active():
return _raw_get_daily_info("https://finance.yahoo.com/most-active?offset=0&count=100")
def get_day_gainers():
return _raw_get_daily_info("https://finance.yahoo.com/gainers?offset=0&count=100")
def get_day_losers():
return _raw_get_daily_info("https://finance.yahoo.com/losers?offset=0&count=100")
def get_top_crypto():
'''Gets the top 100 Cryptocurrencies by Market Cap'''
session = HTMLSession()
resp = session.get("https://finance.yahoo.com/cryptocurrencies?offset=0&count=100")
tables = pd.read_html(resp.html.raw_html)
df = tables[0].copy()
df["% Change"] = df["% Change"].map(lambda x: float(x.strip("%").\
strip("+").\
replace(",", "")))
del df["52 Week Range"]
del df["1 Day Chart"]
fields_to_change = [x for x in df.columns.tolist() if "Volume" in x \
or x == "Market Cap" or x == "Circulating Supply"]
for field in fields_to_change:
if type(df[field][0]) == str:
df[field] = df[field].str.strip("B").map(force_float)
df[field] = df[field].map(lambda x: x if type(x) == str
else x * 1000000000)
df[field] = df[field].map(lambda x: x if type(x) == float else
force_float(x.strip("M")) * 1000000)
session.close()
return df
def get_dividends(ticker, start_date = None, end_date = None, index_as_date = True):
'''Downloads historical dividend data into a pandas data frame.
@param: ticker
@param: start_date = None
@param: end_date = None
@param: index_as_date = True
'''
# build and connect to URL
site, params = build_url(ticker, start_date, end_date, "1d")
resp = requests.get(site, params = params)
if not resp.ok:
raise AssertionError(resp.json())
# get JSON response
data = resp.json()
# check if there is data available for dividends
if "dividends" not in data["chart"]["result"][0]['events']:
raise AssertionError("There is no data available on dividends, or none have been granted")
# get the dividend data
frame = pd.DataFrame(data["chart"]["result"][0]['events']['dividends'])
frame = frame.transpose()
frame.index = pd.to_datetime(frame.index, unit = "s")
frame.index = frame.index.map(lambda dt: dt.floor("d"))
# sort in to chronological order
frame = frame.sort_index()
frame['ticker'] = ticker.upper()
# remove old date column
frame = frame.drop(columns='date')
frame = frame.rename({'amount': 'dividend'}, axis = 'columns')
if not index_as_date:
frame = frame.reset_index()
frame.rename(columns = {"index": "date"}, inplace = True)
return frame
def get_splits(ticker, start_date = None, end_date = None, index_as_date = True):
'''Downloads historical stock split data into a pandas data frame.
@param: ticker
@param: start_date = None
@param: end_date = None
@param: index_as_date = True
'''
# build and connect to URL
site, params = build_url(ticker, start_date, end_date, "1d")
resp = requests.get(site, params = params)
if not resp.ok:
raise AssertionError(resp.json())
# get JSON response
data = resp.json()
# check if there is data available for splits
if "splits" not in data["chart"]["result"][0]['events']:
raise AssertionError("There is no data available on stock splits, or none have occured")
# get the split data
frame = pd.DataFrame(data["chart"]["result"][0]['events']['splits'])
frame = frame.transpose()
frame.index = pd.to_datetime(frame.index, unit = "s")
frame.index = frame.index.map(lambda dt: dt.floor("d"))
# sort in to chronological order
frame = frame.sort_index()
frame['ticker'] = ticker.upper()
# remove unnecessary columns
frame = frame.drop(columns=['date', 'denominator', 'numerator'])
if not index_as_date:
frame = frame.reset_index()
frame.rename(columns = {"index": "date"}, inplace = True)
return frame
def get_earnings(ticker):
'''Scrapes earnings data from Yahoo Finance for an input ticker
@param: ticker
'''
financials_site = "https://finance.yahoo.com/quote/" + ticker + \
"/financials?p=" + ticker
json_info = _parse_json(financials_site)
temp = json_info["earnings"]
result = {}
result["quarterly_results"] = pd.DataFrame.from_dict(temp["earningsChart"]["quarterly"])
result["yearly_revenue_earnings"] = pd.DataFrame.from_dict(temp["financialsChart"]["yearly"])
result["quarterly_revenue_earnings"] = pd.DataFrame.from_dict(temp["financialsChart"]["quarterly"])
return result
|
import pygame
import math
import random
from pygame.locals import *
def main():
"""Main game execution
"""
game_init() # initializing game
load_resources() # loading game resources
game_loop() # looping through game
def game_init():
"""Initializing game
"""
# initializing global variables
global screen, width, height, keys, playerpos, accuracy, arrows
global badtimer,badtimer1, badguys, healthvalue
# initializing game and game-related variables
pygame.init()
width, height = 640, 480 # screen width and height
keys = [False, False, False, False] # game keys (WASD)
playerpos=[100,100] # player position
accuracy =[0,0] # player's accuracy
arrows = [] # arrows
badtimer=100 # timer to decrease for bad guys to appear
badtimer1=0 # timer to increase for bad guys to appear/disappear
badguys=[[640,100]] # bad guys initial opsition
healthvalue=194 # health value
screen = pygame.display.set_mode((width, height))
def load_resources():
"""Loading game resources
"""
# initializing global variables
global player, grass, castle, arrow, gameover
global badguyimg, badguyimg1, healthbar, health, youwin
global shoot, hit, enemy
# initializing mixer
pygame.mixer.init()
# loading resources
player = pygame.image.load("resources/images/dude.png")
grass = pygame.image.load("resources/images/grass.png")
castle = pygame.image.load("resources/images/castle.png")
arrow = pygame.image.load("resources/images/bullet.png")
healthbar = pygame.image.load("resources/images/healthbar.png")
health = pygame.image.load("resources/images/health.png")
badguyimg1 = pygame.image.load("resources/images/badguy.png")
gameover = pygame.image.load("resources/images/gameover.png")
youwin = pygame.image.load("resources/images/youwin.png")
hit = pygame.mixer.Sound("resources/audio/explode.wav")
enemy = pygame.mixer.Sound("resources/audio/enemy.wav")
shoot = pygame.mixer.Sound("resources/audio/shoot.wav")
badguyimg = badguyimg1
# setting up music
hit.set_volume(0.05)
enemy.set_volume(0.05)
shoot.set_volume(0.05)
pygame.mixer.music.load('resources/audio/moonlight.wav')
pygame.mixer.music.play(-1, 0.0)
pygame.mixer.music.set_volume(0.25)
def draw_grass():
"""Drawing grass to the screen
"""
# referencing global variables
global width, height, grass, screen
# iterating over width/grass_width
for x in range(width/grass.get_width() + 1):
# iterating over height/grass_height
for y in range(height/grass.get_height()+1):
# drawing grass on screen
screen.blit(grass,(x*100,y*100))
def draw_castle():
"""Drawing castle
"""
# referencing global variable(s)
global castle, screen
y_castle = 30
# drawing castle(s) on the screen
for x in range(4):
screen.blit(castle, (0,y_castle))
y_castle += 105
def draw_player():
"""Drawing player with z rotation
"""
# referencing global variables
global player, playerpos, playerpos1
# calculazing z rotation value
position = pygame.mouse.get_pos() # getting mouse position
# calculating angle between mouse and player tan(angle) = (y2-y1)/(x2-x1)
# angle = arctan((y2-y1)/(x2-x1))
# angle is in radians
angle = math.atan2(
position[1]-(playerpos[1]+32),
position[0]-(playerpos[0]+26)
)
angle_degress = 360-angle*57.29
# player rotation
playerrot = pygame.transform.rotate(player, angle_degress)
# player new position
playerpos1 = (
playerpos[0]-playerrot.get_rect().width/2,
playerpos[1]-playerrot.get_rect().height/2)
# drawing player on the screen
screen.blit(playerrot, playerpos1)
def draw_arrows():
"""Drawing the arrows fired by the player
"""
# referencing global variables
global arrow, arrows
# updating arrows position with velocity components
for bullet in arrows:
index=0
# velocity vector components:
# x-component: cos(angle) * acceleration
# y-compoent: sin(angle) * acceleration
velx=math.cos(bullet[0])*10 # x-component of the velocity vector
vely=math.sin(bullet[0])*10 # y-value of the velocity vector
# adding velocities to the arrows position components
bullet[1]+=velx
bullet[2]+=vely
# removing arrow from screen
if bullet[1]<-64 or bullet[1]>640 or bullet[2]<-64 or bullet[2]>480:
arrows.pop(index)
index+=1
# drawing arrows on screen
for projectile in arrows:
arrow1 = pygame.transform.rotate(arrow, 360-projectile[0]*57.29)
screen.blit(arrow1, (projectile[1], projectile[2]))
def draw_bad_guys():
"""Drawing bad guys
"""
# referencing global variables
global badtimer, badtimer1, badguys, badguyimg
global healthvalue, accuracy, arrows, hit, enemy
# check if its time to add a new bad guy to the screen
if badtimer == 0:
# ok, its tim to add a new bad guy
# adding a bad guy from any y-coordinate from the right of the screen
# with boundaries
badguys.append([640, random.randint(50,430)])
# reduce time for bad guys to appear
badtimer=100-(badtimer1*2)
# check for another timer
if badtimer1>=35:
badtimer1=35
else:
badtimer1+=5
index=0
for badguy in badguys:
# remove bad guys if they went off-screen
if badguy[0]<-64:
badguys.pop(index)
# reduce bad guys x-position (move to the left)
badguy[0]-=5 # use this variable to modify bad guys speed
# blowing up castle
badrect=pygame.Rect(badguyimg.get_rect())
badrect.top=badguy[1]
badrect.left=badguy[0]
if badrect.left<64:
# hit castle sound
hit.play()
healthvalue -= random.randint(5,20)
badguys.pop(index)
# keeping track of current arrow
index1=0
# checking for collision between bad guys and arrows
for bullet in arrows:
bullrect=pygame.Rect(arrow.get_rect()) # arrow rect
bullrect.left=bullet[1] # left?
bullrect.top=bullet[2] # top?
# checking for collision between arrow and badguy
if badrect.colliderect(bullrect):
# enemy sound
enemy.play()
# a collision happened, increase accuracy?
accuracy[0]+=1
# removing bad guy and arrow from screen
badguys.pop(index)
arrows.pop(index1)
index1+=1
# keeping track of current bad guy
index+=1
# drawing bad guys
for badguy in badguys:
screen.blit(badguyimg, badguy)
def draw_clock():
"""Drawing a timer
"""
# creating a font with size
font = pygame.font.Font(None, 24)
# rendering a text containing the current time
survivedtext = font.render(
(str((90000-pygame.time.get_ticks())/60000)+
":"+str((90000-pygame.time.get_ticks())/1000%60).zfill(2)),
True,(0,0,0))
# retrieving rect for text
textRect = survivedtext.get_rect()
# positioning text on top right corner
textRect.topright=[635,5]
# drawing text onto the screen
screen.blit(survivedtext, textRect)
def draw_health():
"""Drawing health bar
"""
# referencing global variables
global healthbar, health, healthvalue
# drawing health bar
screen.blit(healthbar, (5,5))
for health1 in range(healthvalue):
# according to how much value left, draw health
screen.blit(health, (health1+8,8))
def check_for_end():
"""Checking for the end of game
"""
# referencing global variables
global running, exitcode, accuracy, gameover, accuracy_str
# check if game needs to end
if pygame.time.get_ticks()>=90000:
# time has elapsed
running=0
exitcode=1
if healthvalue<=0:
# player health is gone
running=0
exitcode=0
if accuracy[1]!=0:
accuracy_str=accuracy[0]*1.0/accuracy[1]*100
else:
accuracy_str=0
def end_game():
"""Ending game
"""
# referencing global variables
global accuracy_str, gameover, youwin
# check if player won/lost
if exitcode==0:
# player lost
pygame.font.init()
font = pygame.font.Font(None, 24) # creating font
# rendering text
text = font.render("Accuracy: "+str(accuracy_str)+"%", True, (255,0,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
screen.blit(gameover, (0,0))
screen.blit(text, textRect) # adding text to screen
else:
# player won
pygame.font.init()
font = pygame.font.Font(None, 24) # creating font
# rendering text
text = font.render("Accuracy: "+str(accuracy_str)+"%", True, (0,255,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
screen.blit(youwin, (0,0))
screen.blit(text, textRect) # adding text to screen
pygame.display.flip()
# giving user the ability to quit game
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
def game_events():
"""Checking for game events
"""
# referencing global variables
global keys, playerpos, accuracy, arrows, playerpos1, shoot
# loop through events
for event in pygame.event.get():
# check if the event is the X button
if event.type == pygame.QUIT:
# if it is, quit the game
pygame.quit()
exit(0)
# checking for key down keyboard events
if event.type == pygame.KEYDOWN:
if event.key == K_w: # 'w' key was pressed down
keys[0] = True
if event.key == K_a: # 'a' key was pressed down
keys[1] = True
if event.key == K_s: # 's' key was pressed down
keys[2] = True
if event.key == K_d: # 'd' key was pressed down
keys[3] = True
# checking for key up keyboard events
if event.type == pygame.KEYUP:
if event.key == K_w: # 'w' key was pressed up
keys[0] = False
if event.key == K_a: # 'a' key was pressed up
keys[1] = False
if event.key == K_s: # 's' key was pressed up
keys[2] = False
if event.key == K_d: # 'd' key was pressed up
keys[3] = False
# checking if mouse was clicked AKA an arrow was fired!
if event.type == pygame.MOUSEBUTTONDOWN:
# shoot sound
shoot.play()
position = pygame.mouse.get_pos() # mouse position
accuracy[1]+=1 # increase y accuracy
# calculating the arrow rotation based on the rotated player
# position and the cursor position.
# This rotation value is stored in the arrows array.
# arrow = (angle, x, y)
arrows.append(
[math.atan2(
position[1]-(playerpos1[1]+32),
position[0]-(playerpos1[0]+26)),
playerpos1[0]+32,playerpos1[1]+32])
# updating player position based on which key was pressed
# AKA moving player
if keys[0]:
playerpos[1]-=5
elif keys[2]:
playerpos[1]+=5
if keys[1]:
playerpos[0]-=5
elif keys[3]:
playerpos[0]+=5
def game_loop():
"""Infinite game loop
"""
# referencing global variables
global screen, badtimer
# initializing global variables
global running, exitcode
running = 1 # use to determine if player wins or loses
exitcode = 0 # use to determine if game should be finished
# keeping looping through game
while running:
# clear screen before drawing it again
screen.fill(0)
draw_grass() # drawing grass
draw_castle() # drawing castle(s)
draw_player() # drawing player
draw_arrows() # drawing arrows
draw_bad_guys() # drawing bad guys
draw_clock() # drawing a clock
draw_health() # drawing health!
pygame.display.flip() # update the screen
game_events() # loading game events
# updating bad time for guys to appear
badtimer-=1
# checking for end game
check_for_end()
# ending game
end_game()
if __name__ == "__main__":
main()
|
from .views import IndexView
from sanic_router import Include, Url
routes = (
Url('', IndexView.as_view(), name='index'),
Url('schema/', Include('tests.example.schema.routes', namespace='schema')),
)
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import protos.cisco_mdt_dial_in_pb2 as cisco__mdt__dial__in__pb2
class gRPCConfigOperStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetConfig = channel.unary_stream(
"/IOSXRExtensibleManagabilityService.gRPCConfigOper/GetConfig",
request_serializer=cisco__mdt__dial__in__pb2.ConfigGetArgs.SerializeToString,
response_deserializer=cisco__mdt__dial__in__pb2.ConfigGetReply.FromString,
)
self.MergeConfig = channel.unary_unary(
"/IOSXRExtensibleManagabilityService.gRPCConfigOper/MergeConfig",
request_serializer=cisco__mdt__dial__in__pb2.ConfigArgs.SerializeToString,
response_deserializer=cisco__mdt__dial__in__pb2.ConfigReply.FromString,
)
self.DeleteConfig = channel.unary_unary(
"/IOSXRExtensibleManagabilityService.gRPCConfigOper/DeleteConfig",
request_serializer=cisco__mdt__dial__in__pb2.ConfigArgs.SerializeToString,
response_deserializer=cisco__mdt__dial__in__pb2.ConfigReply.FromString,
)
self.ReplaceConfig = channel.unary_unary(
"/IOSXRExtensibleManagabilityService.gRPCConfigOper/ReplaceConfig",
request_serializer=cisco__mdt__dial__in__pb2.ConfigArgs.SerializeToString,
response_deserializer=cisco__mdt__dial__in__pb2.ConfigReply.FromString,
)
self.CliConfig = channel.unary_unary(
"/IOSXRExtensibleManagabilityService.gRPCConfigOper/CliConfig",
request_serializer=cisco__mdt__dial__in__pb2.CliConfigArgs.SerializeToString,
response_deserializer=cisco__mdt__dial__in__pb2.CliConfigReply.FromString,
)
self.CommitReplace = channel.unary_unary(
"/IOSXRExtensibleManagabilityService.gRPCConfigOper/CommitReplace",
request_serializer=cisco__mdt__dial__in__pb2.CommitReplaceArgs.SerializeToString,
response_deserializer=cisco__mdt__dial__in__pb2.CommitReplaceReply.FromString,
)
self.CommitConfig = channel.unary_unary(
"/IOSXRExtensibleManagabilityService.gRPCConfigOper/CommitConfig",
request_serializer=cisco__mdt__dial__in__pb2.CommitArgs.SerializeToString,
response_deserializer=cisco__mdt__dial__in__pb2.CommitReply.FromString,
)
self.ConfigDiscardChanges = channel.unary_unary(
"/IOSXRExtensibleManagabilityService.gRPCConfigOper/ConfigDiscardChanges",
request_serializer=cisco__mdt__dial__in__pb2.DiscardChangesArgs.SerializeToString,
response_deserializer=cisco__mdt__dial__in__pb2.DiscardChangesReply.FromString,
)
self.GetOper = channel.unary_stream(
"/IOSXRExtensibleManagabilityService.gRPCConfigOper/GetOper",
request_serializer=cisco__mdt__dial__in__pb2.GetOperArgs.SerializeToString,
response_deserializer=cisco__mdt__dial__in__pb2.GetOperReply.FromString,
)
self.CreateSubs = channel.unary_stream(
"/IOSXRExtensibleManagabilityService.gRPCConfigOper/CreateSubs",
request_serializer=cisco__mdt__dial__in__pb2.CreateSubsArgs.SerializeToString,
response_deserializer=cisco__mdt__dial__in__pb2.CreateSubsReply.FromString,
)
self.GetProtoFile = channel.unary_stream(
"/IOSXRExtensibleManagabilityService.gRPCConfigOper/GetProtoFile",
request_serializer=cisco__mdt__dial__in__pb2.GetProtoFileArgs.SerializeToString,
response_deserializer=cisco__mdt__dial__in__pb2.GetProtoFileReply.FromString,
)
class gRPCConfigOperServicer(object):
# missing associated documentation comment in .proto file
pass
def GetConfig(self, request, context):
"""Configuration related commands
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def MergeConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ReplaceConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CliConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CommitReplace(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CommitConfig(self, request, context):
"""Do we need implicit or explicit commit
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ConfigDiscardChanges(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetOper(self, request, context):
"""Get only returns oper data
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateSubs(self, request, context):
"""Get Telemetry Data
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetProtoFile(self, request, context):
"""Get Proto File
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_gRPCConfigOperServicer_to_server(servicer, server):
rpc_method_handlers = {
"GetConfig": grpc.unary_stream_rpc_method_handler(
servicer.GetConfig,
request_deserializer=cisco__mdt__dial__in__pb2.ConfigGetArgs.FromString,
response_serializer=cisco__mdt__dial__in__pb2.ConfigGetReply.SerializeToString,
),
"MergeConfig": grpc.unary_unary_rpc_method_handler(
servicer.MergeConfig,
request_deserializer=cisco__mdt__dial__in__pb2.ConfigArgs.FromString,
response_serializer=cisco__mdt__dial__in__pb2.ConfigReply.SerializeToString,
),
"DeleteConfig": grpc.unary_unary_rpc_method_handler(
servicer.DeleteConfig,
request_deserializer=cisco__mdt__dial__in__pb2.ConfigArgs.FromString,
response_serializer=cisco__mdt__dial__in__pb2.ConfigReply.SerializeToString,
),
"ReplaceConfig": grpc.unary_unary_rpc_method_handler(
servicer.ReplaceConfig,
request_deserializer=cisco__mdt__dial__in__pb2.ConfigArgs.FromString,
response_serializer=cisco__mdt__dial__in__pb2.ConfigReply.SerializeToString,
),
"CliConfig": grpc.unary_unary_rpc_method_handler(
servicer.CliConfig,
request_deserializer=cisco__mdt__dial__in__pb2.CliConfigArgs.FromString,
response_serializer=cisco__mdt__dial__in__pb2.CliConfigReply.SerializeToString,
),
"CommitReplace": grpc.unary_unary_rpc_method_handler(
servicer.CommitReplace,
request_deserializer=cisco__mdt__dial__in__pb2.CommitReplaceArgs.FromString,
response_serializer=cisco__mdt__dial__in__pb2.CommitReplaceReply.SerializeToString,
),
"CommitConfig": grpc.unary_unary_rpc_method_handler(
servicer.CommitConfig,
request_deserializer=cisco__mdt__dial__in__pb2.CommitArgs.FromString,
response_serializer=cisco__mdt__dial__in__pb2.CommitReply.SerializeToString,
),
"ConfigDiscardChanges": grpc.unary_unary_rpc_method_handler(
servicer.ConfigDiscardChanges,
request_deserializer=cisco__mdt__dial__in__pb2.DiscardChangesArgs.FromString,
response_serializer=cisco__mdt__dial__in__pb2.DiscardChangesReply.SerializeToString,
),
"GetOper": grpc.unary_stream_rpc_method_handler(
servicer.GetOper,
request_deserializer=cisco__mdt__dial__in__pb2.GetOperArgs.FromString,
response_serializer=cisco__mdt__dial__in__pb2.GetOperReply.SerializeToString,
),
"CreateSubs": grpc.unary_stream_rpc_method_handler(
servicer.CreateSubs,
request_deserializer=cisco__mdt__dial__in__pb2.CreateSubsArgs.FromString,
response_serializer=cisco__mdt__dial__in__pb2.CreateSubsReply.SerializeToString,
),
"GetProtoFile": grpc.unary_stream_rpc_method_handler(
servicer.GetProtoFile,
request_deserializer=cisco__mdt__dial__in__pb2.GetProtoFileArgs.FromString,
response_serializer=cisco__mdt__dial__in__pb2.GetProtoFileReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"IOSXRExtensibleManagabilityService.gRPCConfigOper", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
class gRPCExecStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ShowCmdTextOutput = channel.unary_stream(
"/IOSXRExtensibleManagabilityService.gRPCExec/ShowCmdTextOutput",
request_serializer=cisco__mdt__dial__in__pb2.ShowCmdArgs.SerializeToString,
response_deserializer=cisco__mdt__dial__in__pb2.ShowCmdTextReply.FromString,
)
self.ShowCmdJSONOutput = channel.unary_stream(
"/IOSXRExtensibleManagabilityService.gRPCExec/ShowCmdJSONOutput",
request_serializer=cisco__mdt__dial__in__pb2.ShowCmdArgs.SerializeToString,
response_deserializer=cisco__mdt__dial__in__pb2.ShowCmdJSONReply.FromString,
)
self.ActionJSON = channel.unary_stream(
"/IOSXRExtensibleManagabilityService.gRPCExec/ActionJSON",
request_serializer=cisco__mdt__dial__in__pb2.ActionJSONArgs.SerializeToString,
response_deserializer=cisco__mdt__dial__in__pb2.ActionJSONReply.FromString,
)
class gRPCExecServicer(object):
# missing associated documentation comment in .proto file
pass
def ShowCmdTextOutput(self, request, context):
"""Exec commands
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ShowCmdJSONOutput(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ActionJSON(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_gRPCExecServicer_to_server(servicer, server):
rpc_method_handlers = {
"ShowCmdTextOutput": grpc.unary_stream_rpc_method_handler(
servicer.ShowCmdTextOutput,
request_deserializer=cisco__mdt__dial__in__pb2.ShowCmdArgs.FromString,
response_serializer=cisco__mdt__dial__in__pb2.ShowCmdTextReply.SerializeToString,
),
"ShowCmdJSONOutput": grpc.unary_stream_rpc_method_handler(
servicer.ShowCmdJSONOutput,
request_deserializer=cisco__mdt__dial__in__pb2.ShowCmdArgs.FromString,
response_serializer=cisco__mdt__dial__in__pb2.ShowCmdJSONReply.SerializeToString,
),
"ActionJSON": grpc.unary_stream_rpc_method_handler(
servicer.ActionJSON,
request_deserializer=cisco__mdt__dial__in__pb2.ActionJSONArgs.FromString,
response_serializer=cisco__mdt__dial__in__pb2.ActionJSONReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"IOSXRExtensibleManagabilityService.gRPCExec", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
|
import numpy as np
import collections
from operator import itemgetter
np.set_printoptions(linewidth=1024, edgeitems=1000)
delta = {'N': (-1, 0), 'W': (0, -1), 'E': (0, 1), 'S': (1, 0)}
inv_delta = {v: k for k, v in delta.items()}
def distance(p1, p2):
return np.array(p2) - np.array(p1)
def manhattan_distance(p1, p2):
return sum(np.abs(distance(p1, p2)))
def build_graph(board):
# Build the list of neighbors for each square in the grid. this is our graph.
graph = {}
for i in range(board.shape[0]):
for j in range(board.shape[1]):
if board[i, j] == 0:
graph[i, j] = []
for direction in 'N', 'E', 'S', 'W':
row, col = i + delta[direction][0], j + delta[direction][1]
if board[row, col] == 0:
graph[i, j].append((row, col))
return graph
def get_squares_in_range(allies, enemies, graph):
"""
returns the positions of the squares that are within range of the enemy units
"""
squares = []
occupied = get_occupied_squares(allies, enemies)
for e in enemies:
if not e.alive:
continue
adjacent = graph[e.pos]
squares.extend([sq for sq in adjacent if sq not in occupied])
return squares
def get_occupied_squares(allies, enemies):
occupied = set()
for unit in allies + enemies:
if unit.alive:
occupied.add(unit.pos)
return occupied
def get_num_moves(pos, board, allies, enemies):
"""
Returns a np array of the board where each element holds
the number of moves required to get from pos to that element.
"""
# Setup the visited set and queue for the BFS
visited, queue = set(), collections.deque([(0, pos)])
grid = np.zeros_like(board) - 1
visited.add(pos)
# Setup a set of spaces occupied by units
occupied = get_occupied_squares(allies, enemies)
# Perform the BFS to find the number of moves required to
# get to every accessible point on the grid.
while queue:
distance, p = queue.popleft()
grid[p] = distance
for neighbor in graph[p]:
if neighbor not in visited and neighbor in graph and neighbor not in occupied:
queue.append((distance+1, neighbor))
visited.add(neighbor)
return grid
def get_cost_map(pos, allies, enemies, graph):
# Setup the visited set and queue for the BFS
visited, queue = set(), collections.deque([(0, pos)])
visited.add(pos)
cost_map = {}
# Setup a set of spaces occupied by units
occupied = get_occupied_squares(allies, enemies)
# Perform the BFS to find the number of moves required to
# get to every accessible point on the grid.
while queue:
distance, p = queue.popleft()
cost_map[p] = distance
for neighbor in graph[p]:
if neighbor not in visited and neighbor in graph and neighbor not in occupied:
queue.append((distance+1, neighbor))
visited.add(neighbor)
return cost_map
def get_shortest_path(start, end, allies, enemies):
"""
Returns a np array of the board where each element holds
the number of moves required to get from pos to that element.
"""
# Setup the visited set and queue for the BFS
visited, queue = set(), collections.deque([(start)])
visited.add(start)
# Setup a set of spaces occupied by units
occupied = get_occupied_squares(allies, enemies)
# Perform the BFS to get the shortest path
prev = {start: None}
while queue:
loc = queue.popleft()
for neighbor in graph[loc]:
if neighbor == end:
# Found the end, clear the queue and log the previous location\
prev[neighbor] = loc
break
if neighbor not in visited and neighbor in graph and neighbor not in occupied:
queue.append((neighbor))
prev[neighbor] = loc
visited.add(neighbor)
# Reconstruct path
path = []
at = end
for i in range(20):
path.append(at)
at = prev[at]
if at is None:
break
else:
path.reverse()
path.reverse()
return path
def print_board(turn, board, elves, goblins):
g = np.empty(board.shape, dtype=str)
g[:, :] = '.'
for i in range(board.shape[0]):
for j in range(board.shape[1]):
if board[i, j] == 1:
g[i, j] = '#'
for elf in elves:
g[elf.pos[0], elf.pos[1]] = 'E'
for gob in goblins:
g[gob.pos[0], gob.pos[1]] = 'G'
print()
for row in range(board.shape[0]):
print(''.join(g[row, :]))
print()
def print_cost_map(turn, board, elves, goblins, cost_map):
g = np.empty(board.shape, dtype=str)
g[:, :] = '.'
for i in range(board.shape[0]):
for j in range(board.shape[1]):
if board[i, j] == 1:
g[i, j] = '#'
for elf in elves:
g[elf.pos[0], elf.pos[1]] = 'E'
for gob in goblins:
g[gob.pos[0], gob.pos[1]] = 'G'
for key, val in cost_map.items():
g[key] = str(val)
print()
for row in range(board.shape[0]):
print(''.join(g[row, :]))
print()
class Unit(object):
def __init__(self, pos, attack_power=3):
self.pos = pos
self.hit_points = 200
self.attack_power = attack_power
self.alive = True
def __repr__(self):
return '{0:6s}: pos:{1:10s} hp:{2:03d} {3}'.format(type(self).__name__, str(self.pos), self.hit_points, ' ' if self.alive else 'X')
def get_attack_options(self, enemies):
attack_directions = []
for t in enemies:
if not t.alive:
continue
if tuple(distance(self.pos, t.pos)) in delta.values():
attack_directions.append(inv_delta[tuple(distance(self.pos, t.pos))])
return attack_directions
def __lt__(self, other):
self_row, self_col = self.pos
other_row, other_col = other.pos
if self_row == other_row:
return self_col < other_col
return self_row < other_row
def try_attack(self, enemies):
# Can this unit attack this turn?
attack_directions = self.get_attack_options(enemies)
if not attack_directions:
return False
fewest_target_hit_points = 1E16
target_to_attack = None
for direction in ('N', 'W', 'E', 'S'):
if direction in attack_directions:
p = self.pos[0] + delta[direction][0], self.pos[1] + delta[direction][1]
target_unit = [enemy for enemy in enemies if enemy.pos == p][0]
if target_unit.hit_points < fewest_target_hit_points:
fewest_target_hit_points = target_unit.hit_points
target_to_attack = target_unit
target_to_attack.hit_points -= self.attack_power
if target_to_attack.hit_points <= 0:
target_to_attack.alive = False
return True
def try_move(self, turn, allies, enemies, board, graph):
# squares in range of the enemy
squares = get_squares_in_range(allies, enemies, graph)
# print(squares)
# cost_map
cost_map = get_cost_map(self.pos, allies, enemies, graph)
# print(cost_map)
# print_cost_map(turn, board, enemies, allies, cost_map)
square_costs = {sq: cost_map[sq] for sq in squares if sq in cost_map}
min_cost_squares = [sq for sq in squares if sq in cost_map and cost_map[sq] == min(square_costs.values())]
if not min_cost_squares:
# No viable targets found, do not move
return False
target_square = sorted(min_cost_squares, key=itemgetter(0, 1))[0]
# now build a new cost_map from the targeted squares perspective
# choose the square adjacent to this unit with the lowest cost for the first move
# if multiple squares have the lowest cost, choose the first in reading order
rev_cost_map = get_cost_map(target_square, allies, enemies, graph)
first_step = None
for direction in 'N', 'W', 'E', 'S':
sq = self.pos[0] + delta[direction][0], self.pos[1] + delta[direction][1]
if sq not in graph or sq not in rev_cost_map:
continue
if first_step is None or rev_cost_map[sq] < rev_cost_map[first_step]:
first_step = sq
if first_step is not None:
self.pos = first_step
return True
return False
def take_turn(self, turn, allies, enemies, board, graph):
# I can't attack if I'm dead
if not self.alive:
return
attack_performed = self.try_attack(enemies)
if attack_performed:
# Turn complete
return
# No attack performed, do a move
move_performed = self.try_move(turn, allies, enemies, board, graph)
if not move_performed:
# Turn complete
return
self.try_attack(enemies)
class Elf(Unit):
def __init__(self, pos, attack_power=3):
super(Elf, self).__init__(pos, attack_power)
class Goblin(Unit):
def __init__(self, pos, attack_power=3):
super(Goblin, self).__init__(pos, attack_power)
def parse_initial_state(initial_state, elf_attack_power=3):
rows = len(initial_state)
cols = len(initial_state[0])
board = np.zeros((rows, cols), dtype=int)
elves = []
goblins = []
for i in range(rows):
for j in range(cols):
if initial_state[i][j] == '#':
board[i, j] = 1
elif initial_state[i][j] == 'E':
elves.append(Elf((i, j), attack_power=elf_attack_power))
elif initial_state[i][j] == 'G':
goblins.append(Goblin((i, j)))
return elves, goblins, board
def solve(initial_state):
elves, goblins, board = parse_initial_state(initial_state)
graph = build_graph(board)
units = elves + goblins
units.sort()
print('\ninitial setup')
print_board(0, board, elves, goblins)
for unit in units:
print(unit)
for turn in range(1,1000):
print('\nstart turn', turn)
# Sort all units by read-order
units = elves + goblins
units.sort()
for unit in units:
if isinstance(unit, Elf):
allies = elves
enemies = goblins
else:
allies = goblins
enemies = elves
if unit is goblins[0] and turn == 2:
print(unit.pos)
unit.take_turn(turn, allies, enemies, board, graph)
# number of enemies remaining
num_enemies = len([e for e in enemies if e.alive])
if num_enemies == 0:
print('done after', turn-1, 'full turns')
hp_remaining = sum([unit.hit_points for unit in units if unit.alive])
print('hitpoints remaining', hp_remaining)
print('result', (turn-1) * hp_remaining)
exit(0)
# clear the dead
elves = [e for e in elves if e.alive]
goblins = [g for g in goblins if g.alive]
print_board(turn, board, elves, goblins)
for unit in units:
print(unit)
print('end turn', turn)
print()
print()
if __name__ == '__main__':
# with open('test_input.txt', 'r') as f:
# lines = [s.rstrip() for s in f.readlines()]
# solve(initial_state=lines)
with open('input.txt', 'r') as f:
lines = [s.rstrip() for s in f.readlines()]
solve(initial_state=lines)
|
import binascii
from helper import dump2file
import Crypto
import Crypto.Random
from Crypto.Hash import SHA
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
def hex2bin(hexStr):
return binascii.unhexlify(hexStr)
def verfiy_candidate_signature(public_address, signature, name):
"""
Check that the provided signature corresponds to transaction
signed by the public key (sender_address)
"""
try:
public_key = RSA.importKey(hex2bin(public_address))
verifier = PKCS1_v1_5.new(public_key)
h = SHA.new(str(name).encode('utf8'))
return verifier.verify(h, hex2bin(signature))
except:
return False
def gen_id():
random_gen = Crypto.Random.new().read
private_key = RSA.generate(1024, random_gen)
public_key = private_key.publickey()
pri_k_exp = private_key.exportKey(format='DER').hex()
pub_k_exp = public_key.exportKey(format='DER').hex()
dump2file(pri_k_exp, "Client_pri.der")
dump2file(pub_k_exp, "Client_pub.der")
response = {
'private_key': pri_k_exp,
'public_key': pub_k_exp
}
return response
def sign_transaction(trans_dict, with_k):
"""
Sign transaction with private key
"""
key = RSA.importKey(hex2bin(with_k))
cipher = PKCS1_v1_5.new(key)
h = SHA.new(str(trans_dict).encode('utf8'))
return cipher.sign(h).hex()
|
## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <phil@secdev.org>
## This program is published under a GPLv2 license
"""
ASN.1 (Abstract Syntax Notation One)
"""
import random
from scapy.config import conf
from scapy.error import Scapy_Exception,warning
from scapy.volatile import RandField
from scapy.utils import Enum_metaclass, EnumElement
class RandASN1Object(RandField):
def __init__(self, objlist=None):
if objlist is None:
objlist = map(lambda x:x._asn1_obj,
filter(lambda x:hasattr(x,"_asn1_obj"), ASN1_Class_UNIVERSAL.__rdict__.values()))
self.objlist = objlist
self.chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
def _fix(self, n=0):
o = random.choice(self.objlist)
if issubclass(o, ASN1_INTEGER):
return o(int(random.gauss(0,1000)))
elif issubclass(o, ASN1_IPADDRESS):
z = RandIP()._fix()
return o(z)
elif issubclass(o, ASN1_STRING):
z = int(random.expovariate(0.05)+1)
return o("".join([random.choice(self.chars) for i in range(z)]))
elif issubclass(o, ASN1_SEQUENCE) and (n < 10):
z = int(random.expovariate(0.08)+1)
return o(map(lambda x:x._fix(n+1), [self.__class__(objlist=self.objlist)]*z))
return ASN1_INTEGER(int(random.gauss(0,1000)))
##############
#### ASN1 ####
##############
class ASN1_Error(Scapy_Exception):
pass
class ASN1_Encoding_Error(ASN1_Error):
pass
class ASN1_Decoding_Error(ASN1_Error):
pass
class ASN1_BadTag_Decoding_Error(ASN1_Decoding_Error):
pass
class ASN1Codec(EnumElement):
def register_stem(cls, stem):
cls._stem = stem
def dec(cls, s, context=None):
return cls._stem.dec(s, context=context)
def safedec(cls, s, context=None):
return cls._stem.safedec(s, context=context)
def get_stem(cls):
return cls.stem
class ASN1_Codecs_metaclass(Enum_metaclass):
element_class = ASN1Codec
class ASN1_Codecs:
__metaclass__ = ASN1_Codecs_metaclass
BER = 1
DER = 2
PER = 3
CER = 4
LWER = 5
BACnet = 6
OER = 7
SER = 8
XER = 9
class ASN1Tag(EnumElement):
def __init__(self, key, value, context=None, codec=None):
EnumElement.__init__(self, key, value)
self._context = context
if codec == None:
codec = {}
self._codec = codec
def clone(self): # /!\ not a real deep copy. self.codec is shared
return self.__class__(self._key, self._value, self._context, self._codec)
def register_asn1_object(self, asn1obj):
self._asn1_obj = asn1obj
def asn1_object(self, val):
if hasattr(self,"_asn1_obj"):
return self._asn1_obj(val)
raise ASN1_Error("%r does not have any assigned ASN1 object" % self)
def register(self, codecnum, codec):
self._codec[codecnum] = codec
def get_codec(self, codec):
try:
c = self._codec[codec]
except KeyError,msg:
raise ASN1_Error("Codec %r not found for tag %r" % (codec, self))
return c
class ASN1_Class_metaclass(Enum_metaclass):
element_class = ASN1Tag
def __new__(cls, name, bases, dct): # XXX factorise a bit with Enum_metaclass.__new__()
for b in bases:
for k,v in b.__dict__.iteritems():
if k not in dct and isinstance(v,ASN1Tag):
dct[k] = v.clone()
rdict = {}
for k,v in dct.iteritems():
if type(v) is int:
v = ASN1Tag(k,v)
dct[k] = v
rdict[v] = v
elif isinstance(v, ASN1Tag):
rdict[v] = v
dct["__rdict__"] = rdict
cls = type.__new__(cls, name, bases, dct)
for v in cls.__dict__.values():
if isinstance(v, ASN1Tag):
v.context = cls # overwrite ASN1Tag contexts, even cloned ones
return cls
class ASN1_Class:
__metaclass__ = ASN1_Class_metaclass
class ASN1_Class_UNIVERSAL(ASN1_Class):
name = "UNIVERSAL"
ERROR = -3
RAW = -2
NONE = -1
ANY = 0
BOOLEAN = 1
INTEGER = 2
BIT_STRING = 3
STRING = 4
NULL = 5
OID = 6
OBJECT_DESCRIPTOR = 7
EXTERNAL = 8
REAL = 9
ENUMERATED = 10
EMBEDDED_PDF = 11
UTF8_STRING = 12
RELATIVE_OID = 13
SEQUENCE = 0x30#XXX 16 ??
SET = 0x31 #XXX 17 ??
NUMERIC_STRING = 18
PRINTABLE_STRING = 19
T61_STRING = 20
VIDEOTEX_STRING = 21
IA5_STRING = 22
UTC_TIME = 23
GENERALIZED_TIME = 24
GRAPHIC_STRING = 25
ISO646_STRING = 26
GENERAL_STRING = 27
UNIVERSAL_STRING = 28
CHAR_STRING = 29
BMP_STRING = 30
IPADDRESS = 0x40
COUNTER32 = 0x41
GAUGE32 = 0x42
TIME_TICKS = 0x43
COUNTER64 = 0x46
SEP = 0x80
class ASN1_Object_metaclass(type):
def __new__(cls, name, bases, dct):
c = super(ASN1_Object_metaclass, cls).__new__(cls, name, bases, dct)
try:
c.tag.register_asn1_object(c)
except:
warning("Error registering %r for %r" % (c.tag, c.codec))
return c
class ASN1_Object:
__metaclass__ = ASN1_Object_metaclass
tag = ASN1_Class_UNIVERSAL.ANY
def __init__(self, val):
self.val = val
def enc(self, codec):
return self.tag.get_codec(codec).enc(self.val)
def __repr__(self):
return "<%s[%r]>" % (self.__dict__.get("name", self.__class__.__name__), self.val)
def __str__(self):
return self.enc(conf.ASN1_default_codec)
def strshow(self, lvl=0):
return (" "*lvl)+repr(self)+"\n"
def show(self, lvl=0):
print self.strshow(lvl)
def __eq__(self, other):
return self.val == other
def __cmp__(self, other):
return cmp(self.val, other)
class ASN1_DECODING_ERROR(ASN1_Object):
tag = ASN1_Class_UNIVERSAL.ERROR
def __init__(self, val, exc=None):
ASN1_Object.__init__(self, val)
self.exc = exc
def __repr__(self):
return "<%s[%r]{{%s}}>" % (self.__dict__.get("name", self.__class__.__name__),
self.val, self.exc.args[0])
def enc(self, codec):
if isinstance(self.val, ASN1_Object):
return self.val.enc(codec)
return self.val
class ASN1_force(ASN1_Object):
tag = ASN1_Class_UNIVERSAL.RAW
def enc(self, codec):
if isinstance(self.val, ASN1_Object):
return self.val.enc(codec)
return self.val
class ASN1_BADTAG(ASN1_force):
pass
class ASN1_INTEGER(ASN1_Object):
tag = ASN1_Class_UNIVERSAL.INTEGER
class ASN1_STRING(ASN1_Object):
tag = ASN1_Class_UNIVERSAL.STRING
class ASN1_BIT_STRING(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.BIT_STRING
class ASN1_PRINTABLE_STRING(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.PRINTABLE_STRING
class ASN1_T61_STRING(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.T61_STRING
class ASN1_IA5_STRING(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.IA5_STRING
class ASN1_NUMERIC_STRING(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.NUMERIC_STRING
class ASN1_VIDEOTEX_STRING(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.VIDEOTEX_STRING
class ASN1_IPADDRESS(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.IPADDRESS
class ASN1_UTC_TIME(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.UTC_TIME
class ASN1_GENERALIZED_TIME(ASN1_STRING):
tag = ASN1_Class_UNIVERSAL.GENERALIZED_TIME
class ASN1_TIME_TICKS(ASN1_INTEGER):
tag = ASN1_Class_UNIVERSAL.TIME_TICKS
class ASN1_BOOLEAN(ASN1_INTEGER):
tag = ASN1_Class_UNIVERSAL.BOOLEAN
class ASN1_ENUMERATED(ASN1_INTEGER):
tag = ASN1_Class_UNIVERSAL.ENUMERATED
class ASN1_NULL(ASN1_INTEGER):
tag = ASN1_Class_UNIVERSAL.NULL
class ASN1_SEP(ASN1_NULL):
tag = ASN1_Class_UNIVERSAL.SEP
class ASN1_GAUGE32(ASN1_INTEGER):
tag = ASN1_Class_UNIVERSAL.GAUGE32
class ASN1_COUNTER32(ASN1_INTEGER):
tag = ASN1_Class_UNIVERSAL.COUNTER32
class ASN1_COUNTER64(ASN1_INTEGER):
tag = ASN1_Class_UNIVERSAL.COUNTER64
class ASN1_SEQUENCE(ASN1_Object):
tag = ASN1_Class_UNIVERSAL.SEQUENCE
def strshow(self, lvl=0):
s = (" "*lvl)+("# %s:" % self.__class__.__name__)+"\n"
for o in self.val:
s += o.strshow(lvl=lvl+1)
return s
class ASN1_SET(ASN1_SEQUENCE):
tag = ASN1_Class_UNIVERSAL.SET
class ASN1_OID(ASN1_Object):
tag = ASN1_Class_UNIVERSAL.OID
def __init__(self, val):
val = conf.mib._oid(val)
ASN1_Object.__init__(self, val)
def __repr__(self):
return "<%s[%r]>" % (self.__dict__.get("name", self.__class__.__name__), conf.mib._oidname(self.val))
def __oidname__(self):
return '%s'%conf.mib._oidname(self.val)
conf.ASN1_default_codec = ASN1_Codecs.BER
|
import pymysql
# from fj_ftx import settings
#
# MYSQL_HOSTS = settings.MYSQL_HOSTS
# MYSQL_USER = settings.MYSQL_USER
# MYSQL_PASSWORD = settings.MYSQL_PASSWORD
# MYSQL_PORT = settings.MYSQL_PORT
# MYSQL_DB = settings.MYSQL_DB
MYSQL_HOSTS = 'cdb-4sj903z8.bj.tencentcdb.com'
MYSQL_USER = 'root'
MYSQL_PASSWORD = 'andylau1987212'
MYSQL_PORT = 10012
MYSQL_DB = 'spiders'
conn = pymysql.connect(host=MYSQL_HOSTS, user=MYSQL_USER, passwd=MYSQL_PASSWORD,
db=MYSQL_DB, port=MYSQL_PORT, charset='utf8')
cursor = conn.cursor()
class Sql:
@classmethod
def insert_ftx(cls, date, province, city, name, reference_rate,
investment_period,
recommend, general, not_recommended, favorable_rate,
followers, Turnover, investors, borrowers, update_time,
Unpaid, per_capita_investment, per_capita_borrowing,
borrowing_number, uncollected_money, unpaid_people, score,
rating, rating_ranking):
sql = "INSERT INTO wdzj (date, province, city, name,reference_rate, " \
"investment_period,recommend, general,not_recommended, " \
"favorable_rate,followers, Turnover,investors, borrowers, " \
"update_time,Unpaid,per_capita_investment, " \
"per_capita_borrowing,borrowing_number, uncollected_money, " \
"unpaid_people,score,rating, rating_ranking) VALUES(%(date)s," \
"%(province)s, %(city)s, %(name)s, %(reference_rate)s, " \
"%(investment_period)s, %(recommend)s, %(general)s," \
"%(not_recommended)s, %(favorable_rate)s, %(followers)s," \
"%(Turnover)s, %(investors)s, %(borrowers)s, %(update_time)s," \
"%(Unpaid)s, %(per_capita_investment)s," \
"%(per_capita_borrowing)s, %(borrowing_number)s," \
"%(uncollected_money)s, %(unpaid_people)s, %(score)s," \
"%(rating)s, %(rating_ranking)s)"
value = {
'date': date,
'province': province,
'city': city,
'name': name,
'reference_rate': reference_rate,
'investment_period': investment_period,
'recommend': recommend,
'general': general,
'not_recommended': not_recommended,
'favorable_rate': favorable_rate,
'followers': followers,
'Turnover': Turnover,
'investors': investors,
'borrowers': borrowers,
'update_time': update_time,
'Unpaid': Unpaid,
'per_capita_investment': per_capita_investment,
'per_capita_borrowing': per_capita_borrowing,
'borrowing_number': borrowing_number,
'uncollected_money': uncollected_money,
'unpaid_people': unpaid_people,
'score': score,
'rating': rating,
'rating_ranking': rating_ranking
}
cursor.execute(sql, value)
conn.commit()
@classmethod
def select_name(cls, name):
sql = "SELECT EXISTS(SELECT 1 FROM wdzj WHERE name=%(name)s)"
value = {
'name': name
}
cursor.execute(sql, value)
return cursor.fetchall()[0]
|
import csv
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import time
URL = 'https://columbian.gwu.edu/2015-2016'
response = requests.get(URL)
html = response.content
soup = BeautifulSoup(html, 'lxml')
all_pages = soup.select('.menu-mlid-1117 > ul > li > a')
all_pages_rows = []
all_pages_headers = []
def parseTable(table, id):
list_of_rows = []
header_list = []
for row in table.findAll('tr')[0:-1]:
list_of_cells = []
for header in row.findAll('th'):
header_list.append(header.text)
for cell in row.findAll('td'):
link = cell.find('a')
if link:
list_of_cells.append(urljoin(URL, link['href']))
else: list_of_cells.append(cell.text)
if list_of_cells:
list_of_cells.append(id)
list_of_rows.append(list_of_cells)
return header_list, list_of_rows
for i, page in enumerate(all_pages):
print('Fetching %s' % page['href'])
current_year = requests.get(urljoin(URL, page['href']))
current_year_soup = BeautifulSoup(current_year.content, 'lxml')
table = current_year_soup.find('table')
header_list, all_rows = parseTable(table, page['href'][1:])
if i == 0:
all_pages_headers.extend(header_list)
all_pages_rows.extend(all_rows)
time.sleep(1) #Chill out for a second before hitting the page again.
all_pages_headers.append('Year')
all_pages_rows.insert(0, list(all_pages_headers))
outfile = open("gwu_grants.csv", "w")
writer = csv.writer(outfile, quoting=csv.QUOTE_NONNUMERIC)
writer.writerows(all_pages_rows)
|
import pytest
from rest_framework.test import APIRequestFactory
from wagtail.api.v2.utils import BadRequestError
from wagtail.core.models import Page
from django.conf import settings
from src.wagtail_rest_pack.comments.create import CreateCommentAPIView
from .factory import new_comment_data
from .help import cleanup_and_prepare
factory = APIRequestFactory()
endpoint = CreateCommentAPIView()
@pytest.fixture(autouse=True)
def run_around_tests():
cleanup_and_prepare()
yield
pass
@pytest.mark.django_db
def test_comment_can_be_added_with_anonymous_user():
# given
page_id = str(list(Page.objects.all())[0].id)
request = factory.post('/api/v2/comments/' + page_id, new_comment_data(page_id))
# when
response = endpoint.dispatch(request)
# then
assert response.status_code == 201
@pytest.mark.django_db
def test_comment_can_not_be_added_without_recaptcha():
# given
page_id = str(list(Page.objects.all())[0].id)
request = factory.post('/api/v2/comments/', new_comment_data(page_id))
# when
response = endpoint.dispatch(request)
# then
assert response.status_code == 400
assert 'recaptcha' in response.data
@pytest.mark.django_db
def test_comment_can_not_be_added_without_recaptcha():
# given
page_id = str(list(Page.objects.all())[0].id)
request = factory.post('/api/v2/comments/', new_comment_data(page_id))
settings.RECAPTCHA_VERIFIER = 'wagtail_rest_pack.recaptcha.google.GoogleRecaptchaVerifier'
# when
response = endpoint.dispatch(request)
# then
assert response.status_code == 403
@pytest.mark.django_db
def test_comment_can_not_be_added_when_model_not_allowed():
# given
page_id = str(list(Page.objects.all())[0].id)
request = factory.post('/api/v2/comments/', new_comment_data(page_id))
settings.ALLOWED_COMMENTED_CONTENT_TYPES = ['wagtailcore.Page2']
# when
try:
endpoint.dispatch(request)
except BadRequestError as e:
assert 'Given content is not allowed to be commented' in e.args[0]
|
import operator
from datetime import timedelta
from functools import reduce
import re
from itertools import chain
from typing import Union, List
import parsy
from parsy import string, Parser
from core.parse_util import lexeme, float_p, integer_p
# See https://en.wikipedia.org/wiki/Unit_of_time for reference
# The order is relevant: from highest to lowest and longest to shortest
# | output | all names | number of seconds |
time_units = [
("yr", ["years", "year", "yr", "y"], 365 * 24 * 3600),
("mo", ["month", "mo", "M"], 31 * 24 * 3600),
("d", ["days", "day", "d"], 24 * 3600),
(None, ["weeks", "week", "w"], 7 * 24 * 3600), # output is none, so it will not be used to print
("h", ["hours", "hour", "h"], 3600),
("min", ["minutes", "minute", "min", "m"], 60),
("s", ["seconds", "second", "s"], 1),
]
time_unit_combines = [",", "and"]
# Check if a string is a valid
DurationRe = re.compile(
"^[+-]?([\\d.]+("
+ "|".join(chain.from_iterable(names for unit, names, _ in time_units))
+ ")\\s*("
+ "|".join(time_unit_combines)
+ ")?\\s*)+$"
)
def combine_durations(elems: List[Union[int, float]]) -> Union[int, float]:
result = 0.0
for d in elems:
result += abs(d)
return result if elems[0] >= 0 else -result
time_unit_parser = reduce(
lambda x, y: x | y, [lexeme(string(name)).result(seconds) for _, names, seconds in time_units for name in names]
)
time_unit_combination: Parser = reduce(lambda x, y: x | y, [lexeme(string(a)) for a in [",", "and"]])
single_duration_parser = parsy.seq((float_p | integer_p), time_unit_parser).combine(operator.mul)
duration_parser = single_duration_parser.sep_by(time_unit_combination.optional(), min=1).map(combine_durations)
def parse_duration(ds: str) -> timedelta:
return timedelta(seconds=duration_parser.parse(ds))
|
# Reference: https://note.com/agw/n/nc052420f3c37
from sense_hat import SenseHat
sense = SenseHat()
sense.clear()
r = [255,0,0]
b = [0,0,255]
i = [75,0,130]
v = [159,0,255]
e = [0,0,0]
w = [255,255,255]
imageOff = [
e,e,e,e,e,e,e,e,
e,e,e,e,e,e,e,e,
e,e,e,e,e,e,e,e,
e,e,e,e,e,e,e,e,
e,e,e,e,e,e,e,e,
e,e,e,e,e,e,e,e,
e,e,e,e,e,e,e,e,
e,e,e,e,e,e,e,e
]
imageUp = [
e,e,e,w,e,e,e,e,
e,e,e,w,e,e,e,e,
e,e,e,w,e,e,e,e,
e,e,e,w,w,w,w,e,
e,e,e,w,e,e,e,e,
e,e,e,w,e,e,e,e,
e,e,e,w,e,e,e,e,
w,w,w,w,w,w,w,w
]
imageDown = [
r,r,r,r,r,r,r,r,
e,e,e,r,e,e,e,e,
e,e,e,r,e,e,e,e,
e,e,e,r,r,r,r,e,
e,e,e,r,e,e,e,e,
e,e,e,r,e,e,e,e,
e,e,e,r,e,e,e,e,
e,e,e,r,e,e,e,e
]
imageRight = [
e,e,e,e,i,e,e,e,
e,e,e,i,e,e,e,e,
i,i,i,i,i,i,i,i,
e,e,i,e,e,e,e,e,
e,i,e,i,i,i,i,i,
i,e,e,i,e,e,e,i,
e,e,e,i,e,e,e,i,
e,e,e,i,i,i,i,i
]
imageLeft = [
e,e,e,e,v,e,e,e,
e,e,e,v,e,e,e,e,
v,v,v,v,v,v,v,v,
e,e,v,e,e,e,e,e,
e,v,e,v,v,v,v,v,
v,e,e,e,e,v,e,e,
e,e,e,e,e,v,e,e,
e,e,e,v,v,v,v,v
]
imageMiddle = [
e,b,e,b,b,b,b,b,
b,b,b,b,e,b,e,b,
e,b,e,b,b,b,b,b,
e,b,e,b,e,b,e,b,
e,b,e,b,b,b,b,b,
e,b,b,e,e,b,e,e,
b,b,e,e,e,b,e,e,
e,b,e,e,e,b,e,e
]
chFlg = 0
image = imageOff
while chFlg >= 0:
for event in sense.stick.get_events():
print(event.direction, event.action)
if event.direction == "down":
image = imageDown
chFlg = 1
if event.direction == "up":
image = imageUp
chFlg = 1
if event.direction == "right":
image = imageRight
chFlg = 1
if event.direction == "left":
image = imageLeft
chFlg = 1
if event.direction == "middle":
chFlg = -1
if chFlg != 0:
sense.clear()
if chFlg == 1:
sense.set_pixels(image)
chFlg = 0
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
class UnvalidEncodingAESKey(Exception):
pass
class AppIdValidationError(Exception):
pass
class InvalidSignature(Exception):
pass
|
#!/usr/bin/python
import sys
import getopt
import jieba
jieba.dt.cache_file = 'jieba.cache.new'
input_path = 'input.txt'
output_path = 'output.txt'
def scut2word(input_path, output_path):
input_fd = open(input_path, "r")
output_fd = open(output_path, "w")
for line in input_fd:
print("[Info] Origional Line:", line, end='')
seg_list = jieba.cut(line)
after_cut_line = " ".join(seg_list)
print("[Info] Finished Cut Line from origional:", after_cut_line, end='')
output_fd.write(after_cut_line)
input_fd.close()
output_fd.close()
print(" ========== [System Message]: Finished Cut!!! Output to", output_path," ========== ")
def main(argv):
inputfile = False
outputfile = False
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print('Tool_JiebaCut_PreProcess.py -i <inputfile> -o <outputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('test.py -i <inputfile> -o <outputfile>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
if not inputfile or not outputfile:
print('Tool_JiebaCut_PreProcess.py -i <inputfile> -o <outputfile>')
sys.exit(2)
print(" ========== [System Message]: Preprocess file Word cut for input file", input_path, " ========== ")
scut2word(inputfile, outputfile)
if __name__ == "__main__":
main(sys.argv[1:])
|
from datanodes.core.utils import dumpException
from datanodes.graphics.graphics_edge import GraphicsEdge
DEBUG = False
class SceneHistory():
def __init__(self, scene):
self.scene = scene
self.history_limit = 32
self._hostory_modified_listeners = []
self.clear()
def clear(self):
self.history_stack = []
self.history_current_step = -1
def storeInitialHistoryStamp(self):
self.storeHistory("Initial History Stamp")
def canUndo(self):
return self.history_current_step > 0
def canRedo(self):
return self.history_current_step + 1 < len(self.history_stack)
def undo(self):
if DEBUG : print("UNDO")
if self.canUndo():
self.history_current_step -= 1
self.restoreHistory()
self.scene.has_been_modified = True
def redo(self):
if DEBUG : print("REDO")
if self.canRedo():
self.history_current_step += 1
self.restoreHistory()
self.scene.has_been_modified = True
def addHistoryModifiedListener(self, callback):
self._hostory_modified_listeners.append(callback)
def restoreHistory(self):
if DEBUG : print("Restoring the history ... current step: @%d" % self.history_current_step,
"(%d)" % len(self.history_stack))
self.restoreHistoryStamp(self.history_stack[self.history_current_step])
for callback in self._hostory_modified_listeners : callback()
def storeHistory(self, desc, setModified=True):
if DEBUG : print("Storing the history", "{0}".format(desc) ,
" ... current step: %d" % self.history_current_step,
"(%d)" % len(self.history_stack))
# if the current step is not at the end of the history stack
if self.history_current_step +1 < len(self.history_stack):
self.history_stack = self.history_stack[:self.history_current_step+1]
# check the history stack size limit
if self.history_current_step + 1 >= self.history_limit:
self.history_stack = self.history_stack[1:]
self.history_current_step -= 1
hs = self.createHistoryStamp(desc)
self.history_stack.append(hs)
self.history_current_step += 1
if setModified:
self.scene.has_been_modified =True
if DEBUG : print(" --- setting step to:", self.history_current_step)
for callback in self._hostory_modified_listeners : callback()
if DEBUG : print("SHS: ", "done running the callbacks")
def restoreHistoryStamp(self, history_stamp):
if DEBUG : print("RHS: ", history_stamp)
try:
self.scene.deserialize(history_stamp['snapshot'])
# restore selection
for edge_id in history_stamp['selection']['edges']:
for edge in self.scene.edges:
if edge.id == edge_id:
edge.grEdge.setSelected(True)
break
for node_id in history_stamp['selection']['nodes']:
for node in self.scene.nodes:
if node.id == node_id:
node.grNode.setSelected(True)
break
except Exception as e : dumpException(e)
def createHistoryStamp(self, desc):
if DEBUG : print("SHS: ", desc)
self_obj = {
"nodes" : [],
"edges" : []
}
for item in self.scene.grScene.selectedItems():
if hasattr(item, "node"):
self_obj['nodes'].append(item.node.id)
elif isinstance(item, GraphicsEdge):
self_obj['edges'].append(item.edge.id)
history_stamp = {
"desc" : desc,
"snapshot" : self.scene.serialize(),
"selection" : self_obj
}
return history_stamp
|
import uuid
from datetime import datetime
from django.db import models
from apps.user.models import User
# Create your models here.
class Category_Article(models.Model):
"""
分类
"""
name = models.CharField(max_length=100)
order = models.IntegerField(default=0,verbose_name='排序')
add_time = models.DateTimeField(default=datetime.now)
class Meta:
verbose_name = '分类'
verbose_name_plural = verbose_name
class Article(models.Model):
"""文章"""
id = models.UUIDField(default=uuid.uuid4,primary_key=True)
authors = models.ForeignKey(User,on_delete=models.CASCADE,verbose_name='用户')
category = models.ForeignKey(Category_Article,on_delete=models.CASCADE,verbose_name='分类')
title = models.CharField(max_length=100)
keywords = models.CharField(max_length=200,blank=True,null=True)
desc = models.CharField(max_length=256,blank=True,null=True)
list_pic = models.ImageField(upload_to='article/%Y%m%d',blank=True,null=True)
content = models.TextField()
click_nums = models.IntegerField(default=0,verbose_name='阅读数量')
is_show = models.BooleanField(default=True,verbose_name='是否删除')
add_time = models.DateTimeField(auto_now_add=True)
def get_number(self):
n= self.article_comment_set.all()
num = self.article_comment_set.count()
for i in n:
num+=i.articlecommentreply_set.count()
return num
def __str__(self):
return self.title
class Meta:
verbose_name = '文章'
verbose_name_plural = verbose_name
ordering = ('-add_time',)
class Recommend(models.Model):
recommends = models.ForeignKey(Article,on_delete=models.CASCADE,null=True,)
is_recommend = models.BooleanField(default=False, verbose_name='是否推荐')
add_time = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = '文章推荐'
verbose_name_plural = verbose_name
ordering = ('-add_time',)
class Article_Comment(models.Model):
""""评论"""
user = models.ForeignKey(User,on_delete=models.CASCADE,verbose_name='用户')
article =models.ForeignKey(Article,verbose_name='文章',on_delete=models.CASCADE)
comments = models.TextField(verbose_name='评论')
address = models.CharField(max_length=50,verbose_name='地址',blank=True,null=True)
url = models.CharField(max_length=60, blank=True, null=True, default='')
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
def __str__(self):
return self.article.title
class Meta:
verbose_name ='文章评论'
verbose_name_plural=verbose_name
ordering = ('-add_time',)
class ArticleCommentReply(models.Model):
"""评论回复"""
user = models.ForeignKey(User,on_delete=models.CASCADE,verbose_name='当前用户',related_name='form_uid')
to_uids = models.ForeignKey(User,on_delete=models.CASCADE,verbose_name='目标用户',related_name='to_uids',default='')
comments = models.TextField(verbose_name='回复内容')
url = models.CharField(max_length=60,blank=True,null=True,default='')
aomments_id = models.ForeignKey(Article_Comment,on_delete=models.CASCADE,verbose_name='回复id')
address = models.CharField(max_length=50, verbose_name='地址',blank=True,null=True)
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
class Headlines(models.Model):
"""头条"""
id = models.UUIDField(default=uuid.uuid4,primary_key=True)
title = models.CharField(max_length=200,verbose_name='标题')
category = models.CharField(max_length=20,verbose_name='分类')
conent = models.TextField(verbose_name='内容',default='')
author_name = models.CharField(max_length=100,verbose_name='来源')
url = models.URLField(verbose_name='地址')
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
class Meta:
ordering = ('-add_time',)
|
"""Example program that shows how to attach meta-data to a stream, and how to
later on retrieve the meta-data again at the receiver side."""
import time
from pylsl import StreamInfo, StreamOutlet, StreamInlet, resolve_stream
# create a new StreamInfo object which shall describe our stream
info = StreamInfo("MetaTester", "EEG", 8, 100, "float32", "myuid56872")
# now attach some meta-data (in accordance with XDF format,
# see also code.google.com/p/xdf)
chns = info.desc().append_child("channels")
for label in ["C3", "C4", "Cz", "FPz", "POz", "CPz", "O1", "O2"]:
ch = chns.append_child("channel")
ch.append_child_value("label", label)
ch.append_child_value("unit", "microvolts")
ch.append_child_value("type", "EEG")
info.desc().append_child_value("manufacturer", "SCCN")
cap = info.desc().append_child("cap")
cap.append_child_value("name", "EasyCap")
cap.append_child_value("size", "54")
cap.append_child_value("labelscheme", "10-20")
# create outlet for the stream
outlet = StreamOutlet(info)
# (...normally here one might start sending data into the outlet...)
# === the following could run on another computer ===
# first we resolve a stream whose name is MetaTester (note that there are
# other ways to query a stream, too - for instance by content-type)
results = resolve_stream("name", "MetaTester")
# open an inlet so we can read the stream's data (and meta-data)
inlet = StreamInlet(results[0])
# get the full stream info (including custom meta-data) and dissect it
info = inlet.info()
print("The stream's XML meta-data is: ")
print(info.as_xml())
print("The manufacturer is: %s" % info.desc().child_value("manufacturer"))
print("Cap circumference is: %s" % info.desc().child("cap").child_value("size"))
print("The channel labels are as follows:")
ch = info.desc().child("channels").child("channel")
for k in range(info.channel_count()):
print(" " + ch.child_value("label"))
ch = ch.next_sibling()
time.sleep(3)
|
from __future__ import (
absolute_import,
unicode_literals,
)
import six
from OpenSSL import crypto
from twisted.internet import (
endpoints,
reactor,
ssl,
task,
)
from twisted.web import (
resource,
server,
)
from pysoa.common.transport.http2_gateway.backend.base import BaseHTTP2BackendThread
class TwistedHTTP2BackendThread(BaseHTTP2BackendThread):
def __init__(self, *args, **kwargs):
super(TwistedHTTP2BackendThread, self).__init__(*args, **kwargs)
def run(self):
with open('cert.pem', 'r') as f:
cert_data = f.read()
with open('privkey.pem', 'r') as f:
key_data = f.read()
site = server.Site(PySOADelayedResource(
requests_queue=self.requests_queue,
responses_queue=self.responses_queue,
))
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_data)
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_data)
options = ssl.CertificateOptions(
privateKey=key,
certificate=cert,
acceptableProtocols=[b'h2'],
)
endpoint = endpoints.SSL4ServerEndpoint(
reactor,
int(self.backend_layer_config['http_port']),
options,
interface=self.backend_layer_config['http_host']
)
endpoint.listen(site)
reactor.run(installSignalHandlers=0)
class PySOADelayedResource(resource.Resource):
isLeaf = True
def __init__(self, requests_queue, responses_queue):
self.requests_queue = requests_queue
self.responses_queue = responses_queue
def _delayed_response(self, request):
try:
protocol_key, stream_id, request_id, message, response_headers = self.responses_queue.get_nowait()
except six.moves.queue.Empty:
d = task.deferLater(reactor, 0, lambda: request)
d.addCallback(self._delayed_response)
return server.NOT_DONE_YET
request.write(message)
request.finish()
def render_POST(self, request):
try:
self.requests_queue.put((
None,
None,
request.content.read(),
), timeout=3)
except six.moves.queue.Full:
pass
d = task.deferLater(reactor, 0, lambda: request)
d.addCallback(self._delayed_response)
return server.NOT_DONE_YET
|
# -*- coding: utf-8 -*-
from django.db import models
class Task(models.Model):
pass
class TaskExecution(models.Model):
pass
|
#!/usr/bin/env python
import argparse
from common import Example, Fact, Rule, Theory, TheoryAssertionRepresentationWithLabel
import json
import problog
from problog.program import PrologString
from problog.core import ProbLog
from problog import get_evaluatable
from problog.formula import LogicFormula, LogicDAG
from problog.sdd_formula import SDD
from problog.engine import NonGroundProbabilisticClause, UnknownClause
from problog.engine_stack import NegativeCycle
import re
import time
from utils import parse_statement
current_milli_time = lambda: int(round(time.time() * 1000))
ruletaker_variable_nl_to_variable_format = {"someone": "X", "something": "Y"}
class Metrics:
"""Class to store accuracy and timing related metrics when running an entire theories dataset
through a theorem proving engine."""
def __init__(self):
self.num_examples = 0
self.num_true = 0
self.num_false = 0
self.num_correct_true = 0
self.num_correct_false = 0
self.num_correct = 0
self.total_elapsed_millisecs = 0
self.num_true_with_exception = 0
self.num_false_with_exception = 0
self.num_correct_true_with_exception = 0
self.num_correct_false_with_exception = 0
self.num_incorrect_true_no_exception = 0
self.num_incorrect_false_no_exception = 0
self.num_no_gold_label = 0
self.exception_num_failures = dict()
def update(self, gold_label, engine_label, engine_exception, elapsed_millisecs):
"""Update metrics. To be called after processing each example from the dataset."""
self.num_examples += 1
self.total_elapsed_millisecs += elapsed_millisecs
if gold_label is None:
self.num_no_gold_label += 1
else:
engine_label_correct = gold_label == engine_label
if not engine_label_correct:
exception_msg = "No Exception"
if engine_exception is not None:
exception_msg = engine_exception
if engine_exception not in self.exception_num_failures:
self.exception_num_failures[engine_exception] = 0
self.exception_num_failures[engine_exception] += 1
if gold_label:
self.num_true += 1
if engine_label:
self.num_correct_true += 1
if engine_exception is not None:
self.num_true_with_exception += 1
if engine_label:
self.num_correct_true_with_exception += 1
else:
if not engine_label:
self.num_incorrect_true_no_exception += 1
else:
self.num_false += 1
if not engine_label:
self.num_correct_false += 1
if engine_exception is not None:
self.num_false_with_exception += 1
if not engine_label:
self.num_correct_false_with_exception += 1
else:
if engine_label:
self.num_incorrect_false_no_exception += 1
self.num_correct = self.num_correct_true + self.num_correct_false
def report(self):
"""Report summarizing the overall accuracy, and breakdown by True and False (gold)
labels. Also reports the number of examples that result in exceptions from the
underlying engine, and timing information."""
if self.num_examples > 0:
avg_elapsed_secs = (self.total_elapsed_millisecs / self.num_examples) / 1000
print(f"Total no. of examples: {self.num_examples}")
if self.num_no_gold_label > 0:
print(f"Found {self.num_no_gold_label} examples without a gold label")
else:
total_no_of_exceptions = (
self.num_true_with_exception + self.num_false_with_exception
)
print(f" No. true: {self.num_true}")
print(f" No. correct: {self.num_correct_true}")
print(f" No. of exceptions: {self.num_true_with_exception}")
print(
f" No. correct with exceptions: {self.num_correct_true_with_exception}"
)
print(
f" No. incorrect without exception: {self.num_incorrect_true_no_exception}"
)
print(f" No. false: {self.num_false}")
print(f" No. correct: {self.num_correct_false}")
print(f" No. of exceptions: {self.num_false_with_exception}")
print(
f" No. correct with exceptions: {self.num_correct_false_with_exception}"
)
print(
f" No. incorrect without exception: {self.num_incorrect_false_no_exception}"
)
print(f"Total no. correct: {self.num_correct}")
print(f"Total no. with exceptions: {total_no_of_exceptions}")
print(f"Accuracy: {(self.num_correct * 100.0) / self.num_examples}")
if total_no_of_exceptions > 0:
print("\nFailure Breakdown by Exception:")
for exception in self.exception_num_failures:
print(
f" {exception}: {self.exception_num_failures[exception]}"
)
print(
f"\nAverage theorem proving time per example: {avg_elapsed_secs} secs\n\n"
)
def format_argument(arg_as_str):
"""Function that takes a string representing a predicate argument and formats it appropriately
depending on whether it is a constatn or a variable.
"""
arg_as_str = arg_as_str.lower()
if arg_as_str in ruletaker_variable_nl_to_variable_format.keys():
# If it's in the mapping, it is a variable, so return an appropriately formatted variable.
return ruletaker_variable_nl_to_variable_format[arg_as_str]
# If it's not in the mapping, it is a constant, so return a lower-cased string.
return arg_as_str
def parse_triple_representation(triple_rep):
"""Function that takes string containing a triple representation in RuleTaker format and creates
a Fact. E.g. input:
(\"cow\" \"needs\" \"bear\" \"+\")
"""
fact = None
triple_rep = triple_rep.strip()
# Remove enclosing parens ()
triple_txt = triple_rep[1:-1]
# Extract the parts of the triple by looking for quotes.
# Replace spaces in predicate/args with underscores to make them valid terms.
triple_parts = []
for m in re.finditer(r'"([^"]+)"', triple_txt):
triple_part = m.group(1).replace(" ", "_")
triple_parts.append(triple_part)
if len(triple_parts) == 4:
arg1 = format_argument(triple_parts[0])
predicate = triple_parts[1]
arg2 = format_argument(triple_parts[2])
polarity = triple_parts[3]
if predicate == "is":
predicate = f"{predicate}_{arg2}"
fact = Fact(polarity, predicate, [arg1])
else:
fact = Fact(polarity, predicate, [arg1, arg2])
return fact
def parse_rule_representation(rule_rep):
"""Function that takes string containing a rule in RuleTaker format and creates
a Rule. E.g. input:
(((\"something\" \"needs\" \"cow\" \"+\")) -> (\"something\" \"is\" \"red\" \"+\"))
"""
rule = None
rule_rep = rule_rep.strip()
# Remove enclosing parens ()
rule_txt = rule_rep[1:-1]
rule_parts = rule_txt.split("->")
if len(rule_parts) == 2:
# LHS is enclosed in parens. Remove ().
lhs = rule_parts[0].strip()[1:-1]
rhs = rule_parts[1]
lhs_facts = []
lhs_parts = []
for m in re.finditer(r"\([^()]+\)", lhs):
lhs_part = m.group(0)
lhs_fact = parse_triple_representation(lhs_part)
if lhs_fact is not None:
lhs_facts.append(lhs_fact)
rhs_fact = parse_triple_representation(rhs)
rule = Rule(lhs_facts, rhs_fact)
return rule
def call_theorem_prover(
theorem_prover, instance_id, question_id, theory, assertion, gold_label
):
"""Function that takes a single theory/assertion example and runs it through the theorem prover
to obtain a label. Returns the obtained label, elapsed time to solve it, and exception returned
by the engine, if any.
"""
obtained_result = False
millisecs_elapsed = 0
print("=======ORIGINAL THEORY=========")
theory_as_txt = theory.program(theorem_prover)
print(theory_as_txt)
theory.preprocess(theorem_prover)
theory_as_txt = theory.program(theorem_prover)
if theorem_prover == "problog":
assertion_lf = assertion.logical_form(theorem_prover, False)
assertion_lf = f"query({assertion_lf})."
program = f"{theory_as_txt}\n{assertion_lf}"
print("=======PROGRAM FROM PREPROCESSED THEORY=========")
print(program)
print("=======EXPECTED LABEL=========")
print(f" {gold_label}")
start_millisecs = current_milli_time()
try:
lf = LogicFormula.create_from(program) # ground the program
dag = LogicDAG.create_from(lf) # break cycles in the ground program
sdd = SDD.create_from(dag)
result = sdd.evaluate()
end_millisecs = current_milli_time()
elapsed_millisecs = end_millisecs - start_millisecs
result_tuples = [(k, v) for k, v in result.items()]
obtained_result = result_tuples[0][1] != 0.0
return obtained_result, elapsed_millisecs, None
except (NegativeCycle, NonGroundProbabilisticClause, UnknownClause) as e:
end_millisecs = current_milli_time()
elapsed_millisecs = end_millisecs - start_millisecs
print(
f"!!!Encountered Exception at instance id {instance_id}, question id {question_id}: {e}"
)
obtained_result = assertion.polarity != "+"
exception_name = str(type(e)).lstrip("<class '").rstrip("'>")
return obtained_result, elapsed_millisecs, exception_name
return obtained_result, elapsed_millisecs, None
def run_theorem_prover(theorem_prover, ip, ip_format, op, report_metrics):
"""Function that takes an input file, calls the theorem prover on every example and gets a label.
Results are written to output file. Metrics are tracked and reported if report_metrics is True.
"""
metrics = Metrics()
if ip_format == "current":
row_ix = 1
for ix, line in enumerate(ip.readlines()):
facts = []
rules = []
instance_json = json.loads(line)
instance = TheoryAssertionRepresentationWithLabel.from_json(instance_json)
if instance is not None:
for lf_str in instance.theory_statements:
statement = parse_statement(lf_str)
if isinstance(statement, Fact):
facts.append(statement)
elif isinstance(statement, Rule):
rules.append(statement)
else:
print(
f"Unable to parse statement {lf_str} in row {row_ix} of input jsonl file!"
)
assertion = parse_statement(instance.assertion_statement)
gold_label = instance.label
theory = Theory(facts, rules)
ix = str(row_ix)
(
engine_label,
elapsed_millisecs,
returned_exception,
) = call_theorem_prover(
theorem_prover, ix, ix, theory, assertion, gold_label
)
if report_metrics:
metrics.update(
gold_label, engine_label, returned_exception, elapsed_millisecs
)
instance_json["label"] = engine_label
json.dump(instance_json, op)
op.write("\n")
else:
print(f"Unexpected input file format in line no. {row_ix}")
row_ix += 1
else:
# Ruletaker Legacy Jsonl Format
for ix, line in enumerate(ip.readlines()):
facts = []
rules = []
instance = json.loads(line)
triples = instance["triples"]
ip_rules = instance.get("rules", [])
questions = instance["questions"]
for triple_key in triples:
triple_obj = triples[triple_key]
triple_rep = triple_obj["representation"]
fact = parse_triple_representation(triple_rep)
if fact is not None:
facts.append(fact)
for rule_key in ip_rules:
rule_obj = ip_rules[rule_key]
rule_rep = rule_obj["representation"]
rule = parse_rule_representation(rule_rep)
if rule is not None:
rules.append(rule)
theory = Theory(facts, rules)
for question_key in questions:
question_obj = questions[question_key]
question_rep = question_obj["representation"]
assertion = parse_triple_representation(question_rep)
gold_label = question_obj.get("answer", None)
(
engine_label,
elapsed_millisecs,
returned_exception,
) = call_theorem_prover(
theorem_prover, ix, question_key, theory, assertion, gold_label
)
if report_metrics:
metrics.update(
gold_label, engine_label, returned_exception, elapsed_millisecs
)
op_obj = {
**instance,
**({f"{theorem_prover}_label": engine_label}),
}
json.dump(op_obj, op)
op.write("\n")
if report_metrics:
metrics.report()
def main():
"""Tool that takes a collection of theory-assertion examples and runs them through a theorem prover.
Supported input format 1: Jsonl format with json objects represented as per the
`TheoryAssertionRepresentationWithLabel` class.
Sample:
{ "json_class": "TheoryAssertionRepresentation",
"theory_statements": [
"1.0::kind('Fiona').",
"1.0::rough('Dave').",
"1.0::smart('Dave').",
"1.0::quiet('Charlie').",
"1.0::kind('Dave').",
"1.0::white('Erin').",
"1.0::young(X) :- white(X).",
"1.0::smart(X) :- big(X), green(X).",
"1.0::kind(X) :- round(X), smart(X).",
"1.0::kind(X) :- quiet(X), round(X).",
"1.0::rough(X) :- round(X), red(X)."
"1.0::kind(X) :- quiet(X).", "1.0::furry(X) :- quiet(X), big(X)."
],
"assertion_statement": "query(1.0::young('Dave').)."
}
Supported input format 2: Ruletaker's legacy Jsonl format (for AI2's internal use with existing RuleTaker datasets)
Sample (there are additional fields not relevant and not shown here):
{ "id": "AttNoneg-D3-319", ...
"triples":{
"triple1":
"text":"Bob is cold.",
"representation":"(\"Bob\" \"is\" \"cold\" \"+\")"
},
"triple2": {
"text":"Erin is nice.",
"representation":"(\"Erin\" \"is\" \"nice\" \"+\")"
},
"triple3":{
"text":"Gary is nice.",
"representation":"(\"Gary\" \"is\" \"nice\" \"+\")"
},
"triple4":{
"text":"Harry is blue.",
"representation":"(\"Harry\" \"is\" \"blue\" \"+\")"
}
},
"rules":{
"rule1":{
"text":"Blue people are furry.",
"representation":"(((\"someone\" \"is\" \"blue\" \"+\")) -> (\"someone\" \"is\" \"furry\" \"+\"))"
},
"rule2":{
"text":"Nice people are furry.",
"representation":"(((\"someone\" \"is\" \"nice\" \"+\")) -> (\"someone\" \"is\" \"furry\" \"+\"))"
},
"rule3":{
"text":"Blue, big people are nice.",
"representation":"(((\"someone\" \"is\" \"blue\" \"+\") (\"someone\" \"is\" \"big\" \"+\"))
-> (\"someone\" \"is\" \"nice\" \"+\"))"
},
"rule4":{
"text":"If someone is cold then they are quiet.",
"representation":"(((\"someone\" \"is\" \"cold\" \"+\"))
-> (\"someone\" \"is\" \"quiet\" \"+\"))"},
}
},
"questions":{
"Q1":{
"question":"Erin is nice.",
"answer":true,
...
"representation":"(\"Erin\" \"is\" \"nice\" \"+\")"
},
"Q2":{
"question":"Gary is not nice.",
"answer":false,
...
"representation":"(\"Gary\" \"is\" \"nice\" \"-\")"
},
"Q3":{
"question":"Gary is furry.",
"answer":true,
"representation":"(\"Gary\" \"is\" \"furry\" \"+\")"
}
}
}
Output jsonl format: Same as above with an additional field "problog_label": <true|false>.
"""
parser = argparse.ArgumentParser(
description="Tool to run theories through a theorem prover."
)
parser.add_argument(
"--input-file",
required=True,
help="Input jsonl file in either the current format or the legacy RuleTaker Jsonl format",
)
parser.add_argument(
"--input-format",
choices=["current", "legacy"],
default="current",
help="Input file format",
)
parser.add_argument(
"--theorem-prover",
default="problog",
help="Thorem proving engine to use. Only supported one right now is problog.",
)
parser.add_argument(
"--output-file",
required=True,
help="Output file containing the theorem prover's output for each theory-assertion instance input. \
Output format will be the same as input format, so this will be either a CSV or a jsonl file.",
)
parser.add_argument(
"--report-metrics",
action="store_true",
help="Flag that will cause metrics (accuracy against gold labels) to be tracked and reported",
)
args = parser.parse_args()
with open(args.input_file, "r") as ip, open(args.output_file, "w") as op:
run_theorem_prover(
args.theorem_prover, ip, args.input_format, op, args.report_metrics
)
if __name__ == "__main__":
main()
|
from django.db import models
import datetime
# Create your models here.
class Player(models.Model):
nickname = models.CharField(blank=False, max_length=50)
name = models.CharField(blank=True, default='', max_length=100)
class Map(models.Model):
name = models.CharField(blank=False, max_length=50)
version = models.DateField(blank=True, default=datetime.datetime.today())
class Team(models.Model):
name = models.CharField(blank=False, max_length=50)
class Tournament(models.Model):
name = models.CharField(blank=False, max_length=200)
class Demo(models.Model):
team1 = models.ForeignKey(Team, related_name='team1')
team2 = models.ForeignKey(Team, related_name='team2')
context = models.ForeignKey(Tournament)
source = models.CharField(blank=False, default='', max_length=400)
class PositionData(models.Model):
demo = models.ForeignKey(Demo)
player = models.ForeignKey(Player)
timestamp = models.CharField(blank=True, default='', max_length=300)
notes = models.TextField(blank=True, default='', max_length=1000)
|
import wrangle
import pandas as pd
# define the urls for data
base_url = 'https://zenodo.org/record/1215899/files/2008_Mossong_POLYMOD_'
contact_common = 'contact_common.csv?download=1'
participant_common = 'participant_common.csv?download=1'
participant_extra = 'participant_extra.csv?download=1'
household_common = 'hh_common.csv?download=1'
# get the data into dataframes
df_contact_common = pd.read_csv(base_url + contact_common)
df_participant_common = pd.read_csv(base_url + participant_common)
df_participant_extra = pd.read_csv(base_url + participant_extra)
df_household_common = pd.read_csv(base_url + household_common)
# match household id with participant id
household_lookup = df_participant_common[['part_id', 'hh_id']].merge(df_household_common, on='hh_id')
household_lookup = household_lookup[['part_id', 'country', 'hh_size']]
# merge the tables
df_participant_temp = df_participant_common.merge(df_participant_extra, on='part_id')
df = df_contact_common.merge(df_participant_temp, on='part_id')
df = df.merge(household_lookup, on='part_id')
# use estimated age for missing contact age values
estimated_age = (df.cnt_age_est_min + df.cnt_age_est_max) / 2
estimated_age = estimated_age.fillna(0).astype(int)
df['contact_age'] = (df.cnt_age_exact.fillna(0) + estimated_age).astype(int)
# keep these cols
cols = ['part_id',
'part_gender',
'contact_age',
'part_age',
'country',
'hh_size',
'cnt_gender',
'cnt_home',
'cnt_work',
'cnt_school',
'cnt_transport',
'cnt_leisure',
'cnt_otherplace']
df = df[cols]
# convert string label values to multi-label columns
df = wrangle.col_to_multilabel(df, 'part_gender')
df = wrangle.col_to_multilabel(df, 'country')
# drop redundant columns
df.drop(['cnt_gender'], 1, inplace=True)
# use these column names instead
cols = ['participant_id',
'contact_age',
'age_group',
'household_size',
'contact_home',
'contact_work',
'contact_school',
'contact_transport',
'contact_leisure',
'contact_other',
'gender_female',
'gender_male',
'country_be',
'country_de',
'country_fi',
'country_gb',
'country_it',
'country_lu',
'country_nl',
'country_pl']
# wrap up
df.columns = cols
_age_temp_ = df_participant_common[['part_id', 'part_age']]
df = df.merge(_age_temp_, left_on='participant_id', right_on='part_id')
# final cleanup
_drop_cols_ = ['age_group', 'part_id']
df = wrangle.col_move_place(df, 'part_age')
df = wrangle.col_move_place(df, 'participant_id')
df = wrangle.df_rename_col(df, 'part_age', 'participant_age')
df.drop(_drop_cols_, 1, inplace=True)
df = df.dropna()
df = df.astype(int)
df.to_csv('polymod_social_contact_data.csv', index=False)
|
import yaml
import numpy as np
from interpolation.splines import UCGrid, CGrid, nodes
from interpolation.splines import filter_cubic, eval_cubic
from mldftdat.dft.xc_models import NormGPFunctional
from sklearn.gaussian_process.kernels import RBF
from itertools import combinations
from argparse import ArgumentParser
from joblib import load, dump
from mldftdat.scripts.train_gp import parse_dataset
from mldftdat.models.kernels import *
"""
Script for mapping a CIDER GP to a cubic spline.
Requires an input DFTGPR object, stored in joblib format.
"""
def get_dim(x, length_scale, density = 6, buff = 0.0, bound = None, max_ngrid = None):
print(length_scale, bound)
mini = np.min(x) - buff
maxi = np.max(x) + buff
if bound is not None:
mini, maxi = bound[0], bound[1]
ran = maxi - mini
ngrid = max(int(density * ran / length_scale) + 1, 3)
if max_ngrid is not None and ngrid > max_ngrid:
ngrid = max_ngrid
return (mini, maxi, ngrid)
def get_mapped_gp_evaluator(gpr, test_x=None, test_y=None, test_rho_data=None,
srbf_density=8, arbf_density=8, max_ngrid=120):
X = gpr.X
alpha = gpr.gp.alpha_
D = X[:,1:]
y = gpr.y
n = gpr.args.agpr_nsingle
N = D.shape[1]
print(gpr.gp.kernel_)
dims = []
if n == 0:
arbf = gpr.gp.kernel_.k1
srbf = None
ndim, length_scale, scale, order = arbf_args(arbf)
elif n == 1:
arbf = gpr.gp.kernel_.k1.k2
srbf = gpr.gp.kernel_.k1.k1
ndim, length_scale, scale, order = arbf_args(arbf)
length_scale = np.append(srbf.length_scale, length_scale)
dims = [get_dim(D[:,0], length_scale[0], density=srbf_density,
bound=gpr.feature_list[0].bounds,
max_ngrid=max_ngrid)]
elif n > 1:
arbf = gpr.gp.kernel_.k1.k2
srbf = gpr.gp.kernel_.k1.k1
ndim, length_scale, scale, order = arbf_args(arbf)
length_scale = np.append(srbf.length_scale, length_scale)
for i in range(n):
dims.append(get_dim(D[:,i], length_scale[i], density=srbf_density,
bound=gpr.feature_list[i].bounds,
max_ngrid=max_ngrid))
scale = np.array(scale)
for i in range(n,N):
dims.append(get_dim(D[:,i], length_scale[i],
density=arbf_density, bound=gpr.feature_list[i].bounds,
max_ngrid=max_ngrid))
grid = [np.linspace(dims[i][0], dims[i][1], dims[i][2])\
for i in range(N)]
k0s = []
print(length_scale)
for i in range(D.shape[1]):
print(length_scale[i], np.min(D[:,i]), np.max(D[:,i]))
diff = (D[:,i:i+1] - grid[i][np.newaxis,:]) / length_scale[i]
k0s.append(np.exp(-0.5 * diff**2))
funcps = []
spline_grids = []
ind_sets = []
if srbf is not None:
srbf_inds = np.arange(n).tolist()
arbf_inds = np.arange(n, N).tolist()
assert order + n <= 4, 'Max order too high, must be at most 4'
const = 0
for o in range(order+1):
for inds in combinations(arbf_inds, o):
if srbf is None:
inds = list(inds)
else:
inds = list(srbf_inds) + list(inds)
if len(inds) == 0:
const += np.sum(alpha)
elif len(inds) == 1:
funcps.append(np.dot(alpha, k0s[inds[0]]))
spline_grids.append(UCGrid(dims[inds[0]]))
ind_sets.append(tuple(inds))
elif len(inds) == 2:
k = np.einsum('ni,nj->nij', k0s[inds[0]], k0s[inds[1]])
spline_grids.append(UCGrid(dims[inds[0]], dims[inds[1]]))
funcps.append(np.einsum('n,nij->ij', alpha, k))
ind_sets.append(tuple(inds))
elif len(inds) == 3:
k = np.einsum('ni,nj,nk->nijk', k0s[inds[0]], k0s[inds[1]],
k0s[inds[2]])
funcps.append(np.einsum('n,nijk->ijk', alpha, k))
spline_grids.append(UCGrid(dims[inds[0]], dims[inds[1]],
dims[inds[2]]))
ind_sets.append(tuple(inds))
elif len(inds) == 4:
k = np.einsum('ni,nj,nk,nl->nijkl', k0s[inds[0]], k0s[inds[1]],
k0s[inds[2]], k0s[inds[3]])
funcps.append(np.einsum('n,nijkl->ijkl', alpha, k))
spline_grids.append(UCGrid(dims[inds[0]], dims[inds[1]],
dims[inds[2]], dims[inds[3]]))
ind_sets.append(tuple(inds))
else:
raise ValueError('Order too high!')
coeff_sets = []
for i in range(len(funcps)):
coeff_sets.append(filter_cubic(spline_grids[i], funcps[i]))
evaluator = NormGPFunctional(scale, ind_sets, spline_grids, coeff_sets,
gpr.xed_y_converter, gpr.feature_list,
gpr.desc_order, const=const,
desc_version=gpr.desc_version,
a0=gpr.a0, fac_mul=gpr.fac_mul,
amin=gpr.amin)
if n == 1 and order == 2:
res, en = arbf(X, get_sub_kernels=True)
resg = srbf(X)
res = np.dot(alpha, arbf.scale[0] * resg)
print("Comparing K and Kspline!!!")
print(en[0])
print(spline_grids[0][0], coeff_sets[0], ind_sets)
tsp = eval_cubic(spline_grids[0],
coeff_sets[0],
X[:,1:2])
diff = (D[:,:1] - D[:,:1].T) / length_scale[0]
tk = np.exp(-0.5 * diff**2)
print(np.mean(np.abs(srbf(X) - tk)))
print(np.mean(np.abs(res - evaluator.predict_from_desc(D, max_order=1))))
print(np.mean(np.abs(res - arbf.scale[0] * tsp)))
print(evaluator.scale[0], scale[0], arbf.scale[0])
print(res[::1000])
print(tsp[::1000])
print("checked 1d")
print(np.mean(res - evaluator.predict_from_desc(D, max_order=1)))
res += np.dot(alpha, arbf.scale[1] * en[1] * resg)
print(np.mean(np.abs(res - evaluator.predict_from_desc(D, max_order=2, vec_eval=True)[0])))
#print(np.mean(np.abs(res - evaluator.predict_from_desc(D, max_order=2))))
res += np.dot(alpha, arbf.scale[2] * en[2] * resg)
print(np.mean(np.abs(res - evaluator.predict_from_desc(D, max_order=3, vec_eval=True)[0])))
#print(np.mean(np.abs(res - evaluator.predict_from_desc(D, max_order=3))))
ytest = gpr.gp.predict(X)
ypred = evaluator.predict_from_desc(D)
print(np.mean(np.abs(ytest - ypred)))
print(np.mean(np.abs(ytest - y)))
print(np.mean(np.abs(y - ypred)))
print(np.linalg.norm(ytest - y))
print(np.linalg.norm(ypred - y))
print(ytest.shape)
if test_x is not None:
ytest = gpr.xed_to_y(gpr.predict(test_x, test_rho_data), test_rho_data)
ypred = gpr.xed_to_y(evaluator.predict(test_x, test_rho_data), test_rho_data)
print(np.max(np.abs(ytest - ypred)))
print(np.max(np.abs(ytest - test_y)))
print(np.max(np.abs(test_y - ypred)))
print()
print(np.linalg.norm(ytest - test_y))
print(np.mean(np.abs(ytest - test_y)))
print(np.mean(ytest - test_y))
print(np.linalg.norm(ypred - test_y))
print(np.mean(np.abs(ypred - test_y)))
print(np.mean(ypred - test_y))
print(np.linalg.norm(ypred - ytest))
print(np.mean(np.abs(ypred - ytest)))
print(np.mean(ypred - ytest))
return evaluator
def main():
parser = ArgumentParser()
parser.add_argument('outname', type=str, help='Spline output filename')
parser.add_argument('fname', type=str, help='GP filename (model to map)')
parser.add_argument('-vs', '--validation-set', nargs='+')
parser.add_argument('--basis', default='def2-qzvppd', type=str,
help='basis set code')
parser.add_argument('--functional', metavar='functional', type=str, default=None,
help='exchange-correlation functional, HF for Hartree-Fock')
parser.add_argument('-v', '--version', default='c', type=str,
help='version of descriptor set. Default c')
parser.add_argument('--srbfd', default=8, type=int,
help='grid density for reduced gradient descriptor, srbfd pts per stddev of feature')
parser.add_argument('--arbfd', default=8, type=int,
help='grid density for other descriptors, arbfd pts per stddev of feature')
parser.add_argument('--maxng', default=120, type=int,
help='maximum number of grid points for a feature')
#srbf_density=8, arbf_density=8, max_ngrid=120
args = parser.parse_args()
print('OUTNAME', args.outname)
gpr = load(args.fname)
assert len(gpr.args.validation_set) % 2 == 0,\
'Need pairs of entries for datasets list.'
nv = 0#len(gpr.args.validation_set) // 2
if nv != 0:
Xv, yv, rhov, rho_datav = parse_dataset(gpr.args, 0, val=True)
for i in range(1, nv):
Xn, yn, rhon, rho_datan, = parse_dataset(gpr.args, i, val=True)
Xv = np.append(Xv, Xn, axis=0)
yv = np.append(yv, yn, axis=0)
rhov = np.append(rhov, rhon, axis=0)
rho_datav = np.append(rho_datav, rho_datan, axis=1)
if nv == 0:
evaluator = get_mapped_gp_evaluator(gpr, srbf_density=args.srbfd,
arbf_density=args.arbfd,
max_ngrid=args.maxng)
else:
evaluator = get_mapped_gp_evaluator(gpr, test_x=Xv, test_y=yv,
test_rho_data=rho_datav,
srbf_density=args.srbfd,
arbf_density=args.arbfd,
max_ngrid=args.maxng)
evaluator.args = gpr.args
evaluator.fx_baseline = gpr.xed_y_converter[2]
evaluator.fxb_num = gpr.xed_y_converter[3]
evaluator.desc_version = gpr.args.version
evaluator.amin = gpr.amin
evaluator.a0 = gpr.a0
evaluator.fac_mul = gpr.fac_mul
dump(evaluator, args.outname)
if __name__ == '__main__':
main()
|
import typing
import json
import uuid
from copy import deepcopy
from lib.crypto.keystore import KeyStore
from ids.models import Id
def create_verifiable_presentation(
id: Id,
attribute_groups: typing.Set[str],
password: str,
entropy: str,
) -> dict:
# Initialize the keystore
keypair = id.owner.keypair
keystore = KeyStore(
(keypair.public_key, keypair.private_key),
password,
)
# Get the required attr groups
result = {
"id": {}
}
for attr in ["idType", "idName", "issuer"]:
val = id.verifiable_id["data"].get(attr, "")
result["id"][attr] = deepcopy(val)
result["id"]["groups"] = []
for group in id.verifiable_id["data"]["groups"]:
group_name = group["data"]["groupName"]
if group_name in attribute_groups:
result["id"]["groups"].append(deepcopy(group))
result["entropy"] = uuid.uuid4().hex + "-" + entropy
# Minify json and sign
data = json.dumps(result, separators=(",", ":"))
sign = keystore.sign(data)
return {
"data": result,
"signature": sign,
}
|
"""
Interactive command tool for chaperone
Usage:
telchap <command> [<args> ...]
"""
# perform any patches first
import chaperone.cutil.patches
# regular code begins
import sys
import os
import asyncio
import shlex
from docopt import docopt
from chaperone.cproc.client import CommandClient
from chaperone.cproc.version import VERSION_MESSAGE
def main_entry():
options = docopt(__doc__, options_first=True, version=VERSION_MESSAGE)
try:
result = CommandClient.sendCommand(options['<command>'] + " " + " ".join([shlex.quote(a) for a in options['<args>']]))
except (ConnectionRefusedError, FileNotFoundError) as ex:
result = "chaperone does not seem to be listening, is it running?\n(Error is: {0})".format(ex)
print(result)
|
from . import views
from django.urls import path
urlpatterns = [
path('', views.perfil, name='perfil-usuario'),
path('alterar-informacoes/<int:id>/', views.alterar_informacoes, name='alterar-informacoes'),
path('excluir-animal/<int:id_animal>/', views.excluir_animal, name='excluir-animal'),
path('editar-animal/<int:id_animal>/', views.editar_animal, name='editar-animal'),
path('animal/<int:id>/', views.perfil_animal, name='perfil-animal'),
path('excluir-conta/', views.excluir_conta, name='excluir-conta'),
]
|
raise NotImplementedError("hmac is not yet implemented in Skulpt")
|
from collections import OrderedDict
from enum import Enum
import joblib
import numpy as np
from ptype.utils import project_root
TYPE_INDEX = 0
MISSING_INDEX = 1
ANOMALIES_INDEX = 2
def _get_unique_vals(col, return_counts=False):
"""List of the unique values found in a column."""
return np.unique([str(x) for x in col.tolist()], return_counts=return_counts)
# Use same names and values as the constants in Model.py. Could consolidate.
class _Status(Enum):
TYPE = 1
MISSING = 2
ANOMALOUS = 3
class _Feature(Enum):
U_RATIO = 5
U_RATIO_CLEAN = 6
U = 7
U_CLEAN = 8
class Column:
def __init__(self, series, p_t, p_z):
self.series = series
self.p_t = p_t
self.p_t_canonical = {}
self.p_z = p_z
self.type = self.inferred_type()
self.unique_vals, self.unique_vals_counts = _get_unique_vals(
self.series, return_counts=True
)
self._initialise_missing_anomalies()
def __repr__(self):
return repr(self.__dict__)
def _initialise_missing_anomalies(self):
row_posteriors = self.p_z[self.type]
max_row_posterior_indices = np.argmax(row_posteriors, axis=1)
self.normal_indices = list(np.where(max_row_posterior_indices == TYPE_INDEX)[0])
self.missing_indices = list(
np.where(max_row_posterior_indices == MISSING_INDEX)[0]
)
self.anomalous_indices = list(
np.where(max_row_posterior_indices == ANOMALIES_INDEX)[0]
)
def inferred_type(self):
"""Get most likely inferred type for the column."""
return max(self.p_t, key=self.p_t.get)
def get_normal_ratio(self):
"""Get proportion of unique values in the column which are considered neither anomalous nor missing."""
return round(
sum(self.unique_vals_counts[self.normal_indices])
/ sum(self.unique_vals_counts),
2,
)
def get_na_ratio(self):
"""Get proportion of unique values in the column which are considered 'missing'."""
return round(
sum(self.unique_vals_counts[self.missing_indices])
/ sum(self.unique_vals_counts),
2,
)
def get_an_ratio(self):
"""Get proportion of unique values in the column which are considered 'anomalous'."""
return round(
sum(self.unique_vals_counts[self.anomalous_indices])
/ sum(self.unique_vals_counts),
2,
)
def get_normal_values(self):
"""Get list of all values in the column which are considered neither anomalous nor missing."""
return list(self.unique_vals[self.normal_indices])
def get_na_values(self):
"""Get a list of the values in the column which are considered 'missing'."""
return list(self.unique_vals[self.missing_indices])
def get_an_values(self):
"""Get a list of the values in the column which are considered 'anomalous'."""
return list(self.unique_vals[self.anomalous_indices])
def reclassify(self, new_t):
"""Assign a different type to the column, and adjust the interpretation of missing/anomalous values
accordingly.
:param new_t: the new type, which must be one of the types known to ptype.
"""
if new_t not in self.p_z:
raise Exception(f"Type {new_t} is unknown.")
self.type = new_t
self._initialise_missing_anomalies()
def _get_features(self, counts):
posterior = OrderedDict()
for t, p in sorted(self.p_t.items()):
# aggregate date subtypes
t_0 = t.split("-")[0]
if t_0 in posterior.keys():
posterior[t_0] += p
else:
posterior[t_0] = p
posterior = posterior.values()
entries = [str(int_element) for int_element in self.series.tolist()]
U = len(np.unique(entries))
U_clean = len(self.normal_indices)
N = len(entries)
N_clean = sum([counts[index] for index in self.normal_indices])
u_ratio = U / N
if U_clean == 0 and N_clean == 0:
u_ratio_clean = 0.0
else:
u_ratio_clean = U_clean / N_clean
return np.array(list(posterior) + [u_ratio, u_ratio_clean, U, U_clean])
|
import numpy as np
import Op, Interface
import ISCV
from GCore import Recon
class PointsFromDetections(Op.Op):
def __init__(self, name='/Reconstruct 3D from Dets', locations='', calibration='', tiltThreshold=0.0002, x2dThreshold=0.01,
x3dThreshold=30.0, minRays=3, seedX3ds='', showContributions=True, pointSize=8.0, colour=(1.0, 0.5, 0.0, 0.7),
setLabels=False, mesh='', visibilityLod='', intersection_threshold=100., generateNormals=False, frameRange=''):
fields = [
('name', 'Name', 'Name', 'string', name, {}),
('locations', 'Detection locations', 'Detection locations', 'string', locations, {}),
('calibration', 'Calibration location', 'Calibration location', 'string', calibration, {}),
('tilt_threshold', 'Tilt threshold', 'Slack factor for tilt pairing', 'float', tiltThreshold, {}),
('x2d_threshold', 'Detection threshold', 'Detections threshold', 'float', x2dThreshold, {}),
('x3d_threshold', '3D threshold', '3D threshold', 'float', x3dThreshold, {}),
('min_rays', 'Min. number of rays', 'Minimum number of rays', 'int', minRays, {}),
('seed_x3ds', '3D seed location', 'Existing 3D seed location', 'string', seedX3ds, {}),
('show_contributions', 'Show contributions', 'Show camera contributions', 'bool', showContributions, {}),
('pointSize', '3D Point size', '3D Point size', 'float', pointSize, {}),
('colour', '3D Point colour', '3D Point colour', 'string', str(colour), {}),
('setLabels', 'Set labels', 'Set labels', 'bool', setLabels, {}),
('mesh', 'Mesh', 'Mesh location', 'string', mesh, {}),
('visibilityLod', 'Visibility LOD location', 'Visibility LOD location', 'string', visibilityLod, {}),
('intersection_threshold', 'Intersection threshold', 'Intersection threshold', 'float', intersection_threshold, {}),
('generateNormals', 'Generate normals', 'Generate normals for visibility checks', 'bool', generateNormals, {}),
('frameRange', 'Frame range', 'Frame range', 'string', frameRange, {})
]
super(self.__class__, self).__init__(name, fields)
self.visibility = None
def flush(self):
self.visibility = None
def cook(self, location, interface, attrs):
if not self.useFrame(interface.frame(), attrs['frameRange']): return
calibrationLocation = attrs['calibration']
if not calibrationLocation: calibrationLocation = interface.root()
# Get the mats from the calibration location
mats = interface.attr('mats', atLocation=calibrationLocation)
if mats is None:
self.logger.error('Attribute mats not found at: %s' % calibrationLocation)
return
Ps = interface.attr('Ps', atLocation=calibrationLocation)
if Ps is None:
Ps = np.array([m[2] / (np.sum(m[2][0, :3] ** 2) ** 0.5) for m in mats], dtype=np.float32)
# Get the detections from the location we are cooking
# x2ds = interface.attr('x2ds')
# x2ds_splits = interface.attr('x2ds_splits')
x2ds = interface.attr('x2ds')
x2ds_splits = interface.attr('x2ds_splits')
x2ds_bright = interface.attr('x2ds', atLocation='/root/cameras/bright')
x2ds_bright_splits = interface.attr('x2ds_splits', atLocation='/root/cameras/bright')
if x2ds is None or x2ds_splits is None:
self.logger.error('Detections not found at: %s' % location)
return
# Get configuration parameters
tilt_threshold = attrs['tilt_threshold']
x2d_threshold = attrs['x2d_threshold']
x3d_threshold = attrs['x3d_threshold']
min_rays = attrs['min_rays']
seed_x3ds_location = attrs['seed_x3ds']
seed_x3ds = None
if min_rays < 2:
self.logger.error('You need at least 2 rays but you specified the minimum to be: %d' % min_rays)
return
if seed_x3ds_location:
seed_x3ds = interface.attr('x3ds', atLocation=seed_x3ds_location)
if self.visibility is None: self.visibility = ISCV.ProjectVisibility.create()
# Check if we have normals
if attrs['mesh'] and interface.hasAttr('normals', atLocation=attrs['mesh']):
normals = interface.attr('normals', atLocation=attrs['mesh'])
self.visibility.setNormals(normals)
# Check if we have visibility LODs
if 'visibilityLod' in attrs and attrs['visibilityLod']:
visibilityLod = interface.location(attrs['visibilityLod'])
if visibilityLod is not None:
lodTris = visibilityLod['tris']
lodVerts = visibilityLod['verts']
lodNormals = visibilityLod['faceNormals']
tris = lodVerts[lodTris]
cameraPositions = np.array([m[4] for m in mats], dtype=np.float32)
self.visibility.setLods(tris, cameraPositions, np.concatenate((lodNormals)),
attrs['intersection_threshold'], attrs['generateNormals'])
# Calculate the 3D reconstructions from the detections
x3ds, labels, _, _ = Recon.intersect_rays(x2ds, x2ds_splits, Ps, mats, seed_x3ds=seed_x3ds, tilt_threshold=tilt_threshold,
x2d_threshold=x2d_threshold, x3d_threshold=x3d_threshold, min_rays=min_rays,
numPolishIts=3, forceRayAgreement=True,
visibility=self.visibility)
if not x3ds.any() or not labels.any(): return
x3ds_labels = np.arange(np.max(labels) + 1)
if attrs['setLabels']:
interface.setAttr('labels', labels)
else:
interface.setAttr('labels', [])
# Find which cameras contribute to the 3D reconstructions (optional?)
cameraPositions = np.array([m[4] for m in mats], dtype=np.float32)
cameraContributions = {}
for label3d in x3ds_labels:
camIds = [interface.findCameraIdFromRayId(rayId, x2ds_splits) for rayId in np.where(labels == label3d)[0]]
cameraContributions[label3d] = camIds
# Create 3D points attributes on the cooked location
pAttrs = {
'x3ds': x3ds,
'x3ds_labels': x3ds_labels,
'x3ds_colour': eval(attrs['colour']),
'x3ds_pointSize': attrs['pointSize'],
'cameraContributions': cameraContributions,
'showCameraContributions': attrs['show_contributions'],
'cameraPositions': cameraPositions
}
interface.createChild('reconstructed', 'points3d', attrs=pAttrs)
class PointsFromDetectionsAll(Op.Op):
def __init__(self, name='/PointsFromDetectionsAll', locations='', calibration='', tiltThreshold=0.0002,
pointSize=8.0, colour=(1.0, 0.5, 0.0, 0.7), mesh='', visibilityLod='',
intersection_threshold=100., generateNormals=True, frameRange=''):
fields = [
('name', 'Name', 'Name', 'string', name, {}),
('locations', 'Detection locations', 'Detection locations', 'string', locations, {}),
('calibration', 'Calibration location', 'Calibration location', 'string', calibration, {}),
('tilt_threshold', 'Tilt threshold', 'Slack factor for tilt pairing', 'float', tiltThreshold, {}),
('pointSize', '3D Point size', '3D Point size', 'float', pointSize, {}),
('colour', '3D Point colour', '3D Point colour', 'string', str(colour), {}),
('mesh', 'Mesh', 'Mesh location', 'string', mesh, {}),
('visibilityLod', 'Visibility LOD location', 'Visibility LOD location', 'string', visibilityLod, {}),
('intersection_threshold', 'Intersection threshold', 'Intersection threshold', 'float', intersection_threshold, {}),
('generateNormals', 'Generate normals', 'Generate normals for visibility checks', 'bool', generateNormals, {}),
('frameRange', 'Frame range', 'Frame range', 'string', frameRange, {})
]
super(self.__class__, self).__init__(name, fields)
self.visibility = None
def flush(self):
self.visibility = None
def calculate3dPointsFromDetections(self, x2ds, splits, mats, Ps=None, tilt_threshold=0.0002):
import itertools
Ts = np.array(zip(*mats)[4],dtype=np.float32)
if Ps is None:
Ps = np.array([m[2] / (np.sum(m[2][0, :3] ** 2) ** 0.5) for m in mats], dtype=np.float32)
numCameras = len(splits) - 1
E = ISCV.compute_E(x2ds, splits, Ps)
rays = Recon.dets_to_rays(x2ds, splits, mats)
cameraPositions = np.array([m[4] for m in mats], dtype=np.float32)
data = []
def norm(a):
return a / (np.sum(a ** 2) ** 0.5)
tilt_axes = np.array([norm(np.dot([-m[0][0, 2], -m[0][1, 2], m[0][0, 0]], m[1][:3, :3])) for m in mats], dtype=np.float32)
# Create all combinations of ci < cj
cameraPairCombinations = np.array(list(itertools.combinations(range(numCameras), 2)), dtype=np.int32)
knownCamPairs = [
(7, 12), (5, 9), (3, 9), (4, 12), (7, 10), (8, 12), (0, 9), (3, 4), (1, 9), (2, 7), (1, 2), (0, 11),
(5, 11), (1, 3), (2, 12), (9, 10), (10, 12), (7, 8), (9, 12), (4, 10), (11, 12), (6, 10), (6, 9),
(8, 10), (3, 6), (0, 7), (4, 9), (1, 7), (0, 5), (2, 4), (1, 10), (5, 7), (3, 12), (4, 6), (2, 11),
(3, 7), (3, 10), (4, 8), (4, 11), (0, 1), (5, 12), (1, 6), (7, 11), (2, 3), (2, 8), (1, 4), (1, 8),
(0, 8), (6, 7), (1, 11), (8, 9), (0, 10), (10, 11), (9, 11), (5, 10), (0, 12), (3, 5), (8, 11),
(0, 3), (5, 8), (7, 9), (6, 11), (6, 12), (1, 5), (6, 8), (3, 8), (0, 6), (2, 5), (0, 4), (5, 6),
(1, 12), (4, 7), (2, 6), (2, 10), (4, 5), (3, 11), (0, 2), (2, 9)
]
# Find valid pairs of camera rays that could intersect and create a 3D reconstruction
for ci, cj in cameraPairCombinations:
# for (ci, cj) in knownCamPairs:
ui, uj = range(splits[ci], splits[ci + 1]), range(splits[cj], splits[cj + 1])
if len(ui) == 0 or len(uj) == 0: continue
axis = cameraPositions[cj] - cameraPositions[ci]
camPairDist = np.linalg.norm(axis)
if camPairDist > 7000.: continue
tilt_i = np.dot(map(norm, np.cross(rays[ui], axis)), tilt_axes[ci])
tilt_j = np.dot(map(norm, np.cross(rays[uj], axis)), tilt_axes[ci]) # NB tilt_axes[ci] not a bug
io = np.argsort(tilt_i)
jo = np.argsort(tilt_j)
for ii, d0 in enumerate(tilt_i[io]):
for ji, d1 in enumerate(tilt_j[jo]):
diff = d0 - d1
if abs(diff) < tilt_threshold:
d = [int(ui[io[ii]]), int(uj[jo[ji]])]
cams = [int(ci), int(cj)]
entry = {'pair': d, 'cameraIds': cams}
data.append(entry)
# Create 3D reconstructions from ray pairs
x3ds = []
for entry in data:
d = entry['pair']
E0, e0 = E[d, :, :3].reshape(-1, 3), E[d, :, 3].reshape(-1)
x3d = np.linalg.solve(np.dot(E0.T, E0) + np.eye(3) * 1e-7, -np.dot(E0.T, e0))
ai, aj = x3d - Ts[ci], x3d - Ts[cj]
angle = np.degrees(np.arccos(np.dot(ai, aj) / (np.linalg.norm(ai) * np.linalg.norm(aj))))
if angle > 120: continue
x3ds.append(x3d)
return x3ds, data, rays, cameraPositions
def cook(self, location, interface, attrs):
if not self.useFrame(interface.frame(), attrs['frameRange']): return
calibrationLocation = attrs['calibration']
if not calibrationLocation: calibrationLocation = interface.root()
# Get the mats from the calibration location
mats = interface.attr('mats', atLocation=calibrationLocation)
if mats is None: return
Ps = interface.attr('Ps', atLocation=calibrationLocation)
# Get the detections from the location we are cooking
x2ds = interface.attr('x2ds')
x2ds_splits = interface.attr('x2ds_splits')
if self.visibility is None: self.visibility = ISCV.ProjectVisibility.create()
# Check if we have normals
if attrs['mesh'] and interface.hasAttr('normals', atLocation=attrs['mesh']):
normals = interface.attr('normals', atLocation=attrs['mesh'])
self.visibility.setNormals(normals)
# Check if we have visibility LODs
if 'visibilityLod' in attrs and attrs['visibilityLod']:
visibilityLod = interface.location(attrs['visibilityLod'])
if visibilityLod is not None:
lodTris = visibilityLod['tris']
lodVerts = visibilityLod['verts']
lodNormals = visibilityLod['faceNormals']
tris = lodVerts[lodTris]
cameraPositions = np.array([m[4] for m in mats], dtype=np.float32)
self.visibility.setLods(tris, cameraPositions, np.concatenate((lodNormals)),
attrs['intersection_threshold'], attrs['generateNormals'])
# Calculate the 3D reconstructions from the detections
x3ds, reconstructionData, rays, cameraPositions = self.calculate3dPointsFromDetections(x2ds, x2ds_splits, mats, Ps,
tilt_threshold=attrs['tilt_threshold'])
x3ds = np.array(x3ds, dtype=np.float32)
labellingData = LabelData((x2ds, x2ds_splits), reconstructionData, rays, cameraPositions, mats)
x3ds_means, x3ds_labels, x3ds_normals, _, _ = cleanAndLabelX3ds(labellingData, x3ds, range(5, 3, -1), self.visibility)
if x3ds_means is None or not x3ds_means.any(): return
# Create 3D points attributes on the cooked location
# interface.setAttr('x3ds', x3ds)
interface.setAttr('x3ds', x3ds_means)
interface.setAttr('x3ds_colour', eval(attrs['colour']))
interface.setAttr('x3ds_pointSize', attrs['pointSize'])
interface.setAttr('x3ds_labels', x3ds_labels)
interface.setType('points')
# Test
class LabelData:
def __init__(self, data1, reconstructionData, rays, cameraPositions, mats):
self.labels = -np.ones(len(rays), dtype=np.int32)
self.labelPositions = {}
self.x3d_threshold = 10.0
self.x2ds, self.splits = data1
self.reconstructionData = reconstructionData
self.rays = rays
self.originalRays = rays
self.cameraPositions = cameraPositions
self.mats = mats
self.labels_temp = -np.ones_like(self.labels)
self.labelPositions_temp = {}
self.Ps = np.array([m[2] / (np.sum(m[2][0, :3] ** 2) ** 0.5) for m in mats], dtype=np.float32)
self.triangles = []
self.points = np.array([])
self.pointLabels = None
self.pointNormals = None
self.cloud = None
self.scores, self.matches, self.matches_splits = [], [], []
def getX3ds(self, tempLabels=False):
if tempLabels:
return self.solveLabels_temp()
self.solveLabels()
return self.points, self.pointLabels, self.pointNormals
def getClusterData(self, x3ds):
if self.cloud is None:
cloud = ISCV.HashCloud3D(x3ds, self.x3d_threshold)
self.scores, self.matches, self.matches_splits = cloud.score(x3ds)
return self.scores, self.matches, self.matches_splits
def solveLabels(self):
self.points, self.pointLabels, self.pointNormals, E, x2d_labels = Recon.solve_x3ds_normals(self.x2ds,
self.splits,
self.labels,
self.Ps,
self.rays)
print "solveLabels:", len(self.points), np.min(self.pointLabels), np.max(self.pointLabels), "(#points | min label | max label)"
def solveLabels_temp(self):
points, pointLabels, pointNormals, E, x2d_labels = Recon.solve_x3ds_normals(self.x2ds,
self.splits,
self.labels_temp,
self.Ps,
self.rays)
print "solveLabels:", len(points), np.min(pointLabels), np.max(pointLabels), "(#points | min label | max label)"
return points, pointLabels, pointNormals
def approveLabels(self, approved_point_labels):
for label in approved_point_labels:
self.labels[np.where(self.labels_temp == label)[0]] = label # Surely this can be done better?
self.labels_temp = np.copy(self.labels)
def getMeshData(self, solve=False):
if solve:
self.getX3ds()
triangles = np.array(self.triangles, dtype=np.int32)
if triangles.any():
verts = self.points
tris = np.array(self.triangles, dtype=np.int32)
return tris, verts
return np.array([]), np.array([])
def findCameraIdFromRayId(rayId, camRaySplits):
dists = rayId - camRaySplits[:-1]
dists[np.where(dists < 0)[0]] = np.sum(camRaySplits)
return dists.argmin()
def cleanAndLabelX3ds(labellingData, x3ds, N, allowStealing=True, pts=np.array([]), visibility=None):
global cameraContributions, rayInfo
labels = labellingData.labels_temp
labelPositions = labellingData.labelPositions
x3d_threshold = labellingData.x3d_threshold
x2ds = labellingData.x2ds
splits = labellingData.splits
reconstructionData = labellingData.reconstructionData
rays = labellingData.rays
cameraPositions = labellingData.cameraPositions
# We want to get only the points that have N neighbours within 1cm
# TODO: Cache this as we'll be using it multiple times
#cloud = ISCV.HashCloud3D(x3ds, x3d_threshold)
#scores, matches, matches_splits = cloud.score(x3ds)
scores, matches, matches_splits = labellingData.getClusterData(x3ds)
#clusterMeanPoints = []
registry = []
x3ds_means = []
x3ds_normals = []
cameraContributions = []
# clusterCameraContributions = []
rawData = None
rayInfo = []
labelsAdded = []
#x2ds, splits = data1
#Ps = np.array([m[2] / (np.sum(m[2][0, :3] ** 2) ** 0.5) for m in mats], dtype=np.float32)
Ps = labellingData.Ps
volatileLabels = []
goldStandardLabels = []
for n in N:
#print ">> Min Rays:", n
whichMatches = np.where(matches_splits[1:] - matches_splits[:-1] >= n)[0]
clusterSplitPairs = np.array(zip(matches_splits[:-1], matches_splits[1:]))[whichMatches]
if n == N: rawData = x3ds[whichMatches]
clusterCounter = 0
x3ds_clusters = []
x3ds_clusterColours = []
x3ds_clusterMeans = []
x3ds_clusterMeansColours = []
x3ds_clusterLabels = []
for matchFrom, matchTo in clusterSplitPairs:
# Find the points for this cluster and calculate the mean position
pointIndices = matches[matchFrom:matchTo]
numPoints = len(pointIndices)
assert(numPoints >= n)
clusterMean = np.mean(x3ds[pointIndices], axis=0)
if len(np.where(np.linalg.norm(clusterMean - cameraPositions, axis=1) < x3d_threshold * 6.0)[0]) > 0:
continue
if pts.any():
if len(pts.shape) == 1:
dists = np.linalg.norm(clusterMean - pts)
else:
dists = np.linalg.norm(clusterMean - pts, axis=1)
if len(np.where(dists > x3d_threshold * 10.0)[0]) > 0:
continue
cluster = x3ds[pointIndices]
x3ds_clusters.extend(cluster)
randomColour = np.concatenate((np.random.rand(3), np.array([0.5], dtype=np.float32)))
x3ds_clusterColours.extend(np.tile(randomColour, (cluster.shape[0], 1)))
x3ds_clusterMeans.append(clusterMean)
x3ds_clusterMeansColours.append(randomColour)
x3ds_clusterLabels.append(clusterCounter)
# Get all the rays used to make the points in this cluster. This will be a Nx3 matrix
rayIndices = np.unique([reconstructionData[pi]['pair'] for pi in pointIndices])
pointRays = rays[rayIndices]
# Calculate the dot product for each combination of rays. This will be a NxN matrix
raysDps = np.dot(pointRays, pointRays.T)
# Find the ray which has the highest agreement with the others (sum of dot products)
bestRay = np.sum(raysDps > 0, axis=0).argmax()
# Find which other rays are in agreement with the best ray (dp > 0)
goodRays = np.where(raysDps[bestRay] > 0.05)[0]
# As all the (good) rays in the cluster should be contributing to creating a single point, we will
# give them a new label that identifies them with the detection/reconstruction for that point
#currentLabel = len(clusterMeanPoints)
currentLabel = len(labelPositions)
labelForPointReconstruction = currentLabel
# Only continue with rays from a unique set of cameras
camerasForRays = [findCameraIdFromRayId(rayId, splits) for rayId in rayIndices[goodRays]]
uniqueRayCams, uniqueRayCamsIdx = np.unique(camerasForRays, return_index=True)
goodRays = goodRays[uniqueRayCamsIdx]
rayInfo.append(raysDps[goodRays]) # TODO: Fix.. nonsense
existingLabelsForRays = labels[rayIndices[goodRays]]
knownLabelIndices = np.where(existingLabelsForRays != -1)[0]
rayIdsForKnownLabels = rayIndices[knownLabelIndices]
camerasForKnownLabels = [findCameraIdFromRayId(rayId, splits) for rayId in rayIdsForKnownLabels]
uniqueCams, uniqueCamsIdx = np.unique(camerasForKnownLabels, return_index=True)
knownLabelIndices = knownLabelIndices[uniqueCamsIdx]
knownLabels = existingLabelsForRays[knownLabelIndices]
clusterCounter += 1
# We check if any of the rays have been assigned a label before (i.e. they will contribute to
# reconstructing a 3D point). If that is the case then we have to make decision whether we
# want to our rays in this cluster to contribute to the existing label (reconstruction), or
# if we want to steal the labelled rays so that they now contribute to creating a new label
# for this cluster
threshold = x3d_threshold ** 2
for label in np.unique(knownLabels):
# The ray has been labelled to create a 3D point. If that point is within threshold distance
# of the current cluster we give this cluster the same label. In essence we are merging the
# rays in this cluster with the rays that are already contributing to the label.
# However, if the reconstructed label and the cluster mean are further away from each other
# we will relabel it with the new label for this cluster which equates to stealing it.
#dist = np.linalg.norm(clusterMeanPoints[label] - clusterMean)
dist = np.linalg.norm(labelPositions[label] - clusterMean)
if dist < threshold:
labelForPointReconstruction = label
break
# threshold = dist
_clusterId, _clusterX3dId = len(labelPositions) - 1, len(x3ds_clusterMeans) - 1
# Label the rays with the new or existing (merged) label
useNewLabel = False
unknownLabels = np.where(existingLabelsForRays == -1)[0]
if labelForPointReconstruction == currentLabel:
# No merging is going on
if len(unknownLabels) > 0:
labels[rayIndices[goodRays][unknownLabels]] = currentLabel
useNewLabel = True
if allowStealing:
for knownLabel in knownLabelIndices:
rayIdsWithLabel = np.where(labels == existingLabelsForRays[knownLabel])[0]
numRaysForLabel = len(rayIdsWithLabel)
# if existingLabelsForRays[knownLabel] not in volatileLabels and numRaysForLabel < 3:
# if existingLabelsForRays[knownLabel] not in goldStandardLabels and numRaysForLabel < 3:
# if existingLabelsForRays[knownLabel] not in goldStandardLabels:
agreement = np.where(np.sum(np.dot(bestRay, rays[rayIdsWithLabel]) > 0, axis=1) > 1)[0]
if True:
labels[rayIndices[goodRays][knownLabel]] = currentLabel
useNewLabel = True
else:
# Employ merging strategy
if allowStealing:
rayIdsWithLabel = np.where(labels == labelForPointReconstruction)[0]
agreement = np.where(np.sum(np.dot(bestRay, rays[rayIdsWithLabel]) > 0, axis=1) > 1)[0]
labels[rayIndices[goodRays][unknownLabels]] = labelForPointReconstruction
for knownLabel in knownLabelIndices:
numRaysForLabel = len(np.where(labels == existingLabelsForRays[knownLabel])[0])
# if existingLabelsForRays[knownLabel] not in goldStandardLabels and numRaysForLabel < 3:
if existingLabelsForRays[knownLabel] not in goldStandardLabels:
labels[rayIndices[goodRays][knownLabel]] = currentLabel
useNewLabel = True
else:
labels[rayIndices[goodRays]] = labelForPointReconstruction
if useNewLabel:
labelPositions[currentLabel] = clusterMean
labelsAdded.append(currentLabel)
goldStandardLabels = np.where(labels != -1)[0]
if len(np.where(labels != -1)[0]) == 0:
return np.array([]), np.array([]), np.array([]), rawData, labelsAdded
# x3ds_means, x3ds_labels, _, _ = Recon.solve_x3ds(x2ds, splits, labels, Ps)
x3ds_means, x3ds_labels, x3ds_normals, _, _ = Recon.solve_x3ds_normals(x2ds, splits, labels, Ps, rays)
# x2d_threshold = 30. / 2000.
# clouds = ISCV.HashCloud2DList(x2ds, splits, x2d_threshold)
# _, labels, _ = clouds.project_assign_visibility(x3ds_means, None, Ps, x2d_threshold, visibility)
labellingData.labels = labels
usedLabels = np.array(np.where(labels != -1)[0], dtype=np.int32)
return x3ds_means, x3ds_labels, x3ds_normals, rawData, labelsAdded
# Register Ops
import Registry
Registry.registerOp('Reconstruct 3D from Dets', PointsFromDetections)
Registry.registerOp('Reconstruct 3D from Dets (all)', PointsFromDetectionsAll)
|
from etna.transforms.nn.pytorch_forecasting import PytorchForecastingTransform
|
__strict__ = True
import httpx
from core.config import Config
async def client_fetch(endpoint: str, payload: dict = None) -> dict:
payload.update({"key": Config.STEAM_API_KEY})
async with httpx.AsyncClient() as client:
result = await client.get("https://api.steampowered.com" + endpoint, params=payload, timeout=10)
result.raise_for_status()
return result.json()
|
from __future__ import unicode_literals
from django.db import models
class Comment(models.Model):
"""Describes the comment"""
name = models.CharField(max_length=128)
userpic = models.URLField()
url = models.URLField()
text = models.TextField()
like = models.PositiveIntegerField(default=0)
dislike = models.PositiveIntegerField(default=0)
date = models.DateTimeField()
def __unicode__(self):
return 'Comment: {} (user: {})'.format(self.id, self.name)
class Reply(models.Model):
"""Describes the reply for comment"""
name = models.CharField(max_length=128)
userpic = models.URLField()
url = models.URLField()
text = models.TextField()
like = models.PositiveIntegerField(default=0)
dislike = models.PositiveIntegerField(default=0)
date = models.DateTimeField()
parent = models.ForeignKey(Comment, related_name='children')
def __unicode__(self):
return 'Reply for {}: user {}'.format(self.parent.id, self.name)
|
from .internal import _curry1, _curry2, _curry3, _arity
from .function import empty, curry_n, lift
from .internal import _equals, _get_arity, _fix_arity, _is_function
__all__ = ["all_pass", "any_pass", "and_", "lt", "gt", "both", "complement",
"if_else", "cond", "or_", "not_", "is_empty", "until", "when"]
@_curry1
def all_pass(preds):
fixed_preds = [_fix_arity(p) for p in preds]
return curry_n(max(map(_get_arity, preds)) if len(preds) else 1,
lambda *args: all(map(lambda p: p(*args), fixed_preds))
if len(preds) else True)
@_curry1
def any_pass(preds):
fixed_preds = [_fix_arity(p) for p in preds]
return curry_n(max(map(_get_arity, preds)) if len(preds) else 1,
lambda *args: any(map(lambda p: p(*args), fixed_preds))
if len(preds) else False)
@_curry2
def and_(a, b):
return a and b
@_curry2
def lt(a, b):
return a < b
@_curry2
def gt(a, b):
return a > b
@_curry2
def or_(a, b):
return a or b
@_curry1
def not_(a):
return not a
@_curry2
def both(f, g):
return lambda *args: f(*args) and g(*args) if _is_function(f) else \
lift(and_)(f, g)
complement = lift(not_)
@_curry3
def if_else(condition, on_true, on_false):
return curry_n(
max(map(_get_arity, [condition, on_true, on_false])),
lambda *args: _fix_arity(on_true)(*args)
if _fix_arity(condition)(*args) else _fix_arity(on_false)(*args))
@_curry1
def cond(pairs):
arity = max(map(lambda p: _get_arity(p[0]), pairs), default=0)
def do(*args):
for pred, fn in pairs:
if (pred(*args)):
return fn(*args)
return _arity(arity, do)
@_curry1
def is_empty(x):
return x is not None and _equals(x, empty(x))
@_curry3
def until(pred, fn, init):
val = init
while not pred(val):
val = fn(val)
return val
@_curry3
def when(pred, when_true_fn, x):
return when_true_fn(x) if pred(x) else x
|
from decimal import Decimal
from django.conf import settings
from django.utils.dateparse import parse_date
from mt940_writer import Account, Balance, Statement, Transaction, TransactionType
from . import MT940_STMT_LABEL
from .utils import (
retrieve_all_transactions, get_daily_file_uid, get_or_create_file,
reconcile_for_date, retrieve_last_balance, get_full_narrative
)
def get_bank_statement_file(api_session, receipt_date):
filepath = get_or_create_file(
MT940_STMT_LABEL,
receipt_date,
generate_bank_statement,
f_args=[api_session, receipt_date]
)
return open(filepath, 'rb')
def generate_bank_statement(api_session, receipt_date):
start_date, end_date = reconcile_for_date(api_session, receipt_date)
transactions = retrieve_all_transactions(
api_session,
received_at__gte=start_date,
received_at__lt=end_date
)
transaction_records = []
credit_num = 0
credit_total = 0
debit_num = 0
debit_total = 0
for transaction in transactions:
narrative = get_full_narrative(transaction)
amount = Decimal(transaction['amount']) / 100
if transaction['category'] == 'debit':
amount *= -1
debit_num += 1
debit_total += amount
else:
if transaction.get('ref_code'):
narrative = str(transaction['ref_code']) + ' BGC'
credit_num += 1
credit_total += amount
transaction_record = Transaction(
receipt_date,
amount,
TransactionType.miscellaneous,
narrative
)
transaction_records.append(transaction_record)
account = Account(settings.BANK_STMT_ACCOUNT_NUMBER, settings.BANK_STMT_SORT_CODE)
last_balance = retrieve_last_balance(api_session, receipt_date)
if last_balance:
opening_date = parse_date(last_balance['date']) or receipt_date
opening_amount = Decimal(last_balance['closing_balance']) / 100
else:
opening_date = receipt_date
opening_amount = 0
closing_amount = opening_amount + credit_total + debit_total
opening_balance = Balance(opening_amount, opening_date, settings.BANK_STMT_CURRENCY)
closing_balance = Balance(closing_amount, receipt_date, settings.BANK_STMT_CURRENCY)
statement = Statement(
get_daily_file_uid(), account, '1/1', opening_balance,
closing_balance, transaction_records
)
return str(statement)
|
# -*- coding: utf-8 -*-
"""
seasonedParser.__init__
Set module settings.
"""
from .__version__ import __version__
|
# version 2.0.0
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class AttentionXCosNet(nn.Module):
def __init__(self, conf):
super(AttentionXCosNet, self).__init__()
self.embedding_net = nn.Sequential(
nn.Conv2d(32, 16, 3, padding=1),
nn.BatchNorm2d(16),
nn.PReLU()
)
self.attention = nn.Sequential(
nn.Conv2d(32, 16, 3, padding=1),
nn.BatchNorm2d(16),
nn.PReLU(),
nn.Conv2d(16, 1, 3, padding=1),
nn.BatchNorm2d(1),
nn.PReLU(),
)
self.name = 'AttenCosNet'
self.USE_SOFTMAX = conf.USE_SOFTMAX
self.SOFTMAX_T = conf.SOFTMAX_T
def softmax(self, x, T=1):
x /= T
return F.softmax(x.reshape(x.size(0), x.size(1), -1), 2).view_as(x)
def divByNorm(self, x):
'''
attention_weights.size(): [bs, 1, 7, 6]
'''
x -= x.view(x.size(0), x.size(1), -1).min(dim=2)[0].repeat(1, 1, x.size(2) * x.size(3)).view(x.size(0), x.size(1), x.size(2), x.size(3))
x /= x.view(x.size(0), x.size(1), -1).sum(dim=2).repeat(1, 1, x.size(2) * x.size(3)).view(x.size(0), x.size(1), x.size(2), x.size(3))
return x
def forward(self, feat_grid_1, feat_grid_2):
'''
feat_grid_1.size(): [bs, 32, 7, 7]
attention_weights.size(): [bs, 1, 7, 7]
'''
# XXX Do I need to normalize grid_feat?
conv1 = self.embedding_net(feat_grid_1)
conv2 = self.embedding_net(feat_grid_2)
fused_feat = torch.cat((conv1, conv2), dim=1)
attention_weights = self.attention(fused_feat)
# To Normalize attention
if self.USE_SOFTMAX:
attention_weights = self.softmax(attention_weights, self.SOFTMAX_T)
else:
attention_weights = self.divByNorm(attention_weights)
return attention_weights
class AttentionCosNet(nn.Module):
def __init__(self):
super(AttentionCosNet, self).__init__()
self.embedding_net = nn.Sequential(
nn.Conv2d(512, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.PReLU()
)
self.attention = nn.Sequential(
nn.Conv2d(512, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.PReLU(),
nn.Conv2d(256, 1, 3, padding=1),
nn.BatchNorm2d(1),
nn.PReLU(),
)
self.name = 'AttentionCosNet'
def softmax(self, x):
return F.softmax(x.reshape(x.size(0), x.size(1), -1), 2).view_as(x)
def forward(self, x1, x2):
'''
x1.size(): [bs, 512, 7, 6]
attention_weights.size(): [bs, 1, 7, 6]
'''
conv1 = self.embedding_net(x1)
conv2 = self.embedding_net(x2)
fused_feat = torch.cat((conv1, conv2), dim=1)
attention_weights = self.attention(fused_feat)
# XXX: I use softmax instead of normalize
# attention_weights = F.normalize(attention_weights, p=2, dim=1)
attention_weights = self.softmax(attention_weights)
return x1, x2, attention_weights
class EmbeddingNet(nn.Module):
def __init__(self):
super(EmbeddingNet, self).__init__()
self.convnet = nn.Sequential(nn.Conv2d(1, 32, 5), nn.PReLU(),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(32, 64, 5), nn.PReLU(),
nn.MaxPool2d(2, stride=2))
self.fc = nn.Sequential(nn.Linear(64 * 4 * 4, 256),
nn.PReLU(),
nn.Linear(256, 256),
nn.PReLU(),
nn.Linear(256, 2)
)
def forward(self, x):
output = self.convnet(x)
output = output.view(output.size()[0], -1)
output = self.fc(output)
return output
def get_embedding(self, x):
return self.forward(x)
class EmbeddingNetL2(EmbeddingNet):
def __init__(self):
super(EmbeddingNetL2, self).__init__()
def forward(self, x):
output = super(EmbeddingNetL2, self).forward(x)
output /= output.pow(2).sum(1, keepdim=True).sqrt()
return output
def get_embedding(self, x):
return self.forward(x)
class ClassificationNet(nn.Module):
def __init__(self, embedding_net, n_classes):
super(ClassificationNet, self).__init__()
self.embedding_net = embedding_net
self.n_classes = n_classes
self.nonlinear = nn.PReLU()
self.fc1 = nn.Linear(2, n_classes)
def forward(self, x):
output = self.embedding_net(x)
output = self.nonlinear(output)
scores = F.log_softmax(self.fc1(output), dim=-1)
return scores
def get_embedding(self, x):
return self.nonlinear(self.embedding_net(x))
class SiameseNet(nn.Module):
def __init__(self, embedding_net):
super(SiameseNet, self).__init__()
self.embedding_net = embedding_net
def forward(self, x1, x2):
output1 = self.embedding_net(x1)
output2 = self.embedding_net(x2)
return output1, output2
def get_embedding(self, x):
return self.embedding_net(x)
class TripletNet(nn.Module):
def __init__(self, embedding_net):
super(TripletNet, self).__init__()
self.embedding_net = embedding_net
def forward(self, x1, x2, x3):
output1 = self.embedding_net(x1)
output2 = self.embedding_net(x2)
output3 = self.embedding_net(x3)
return output1, output2, output3
def get_embedding(self, x):
return self.embedding_net(x)
class ENMSiameseNet(nn.Module):
def __init__(self, embedding_net):
super(ENMSiameseNet, self).__init__()
self.embedding_net = embedding_net
self.name = 'Siamese'
def forward(self, x1, x2):
output1 = self.embedding_net(x1)
output2 = self.embedding_net(x2)
return output1, output2
def get_embedding(self, x):
return self.embedding_net(x)
class ENMTripletNet(nn.Module):
def __init__(self, embedding_net):
super(ENMTripletNet, self).__init__()
self.embedding_net = embedding_net
self.name = 'Triplet'
def forward(self, x1, x2, x3):
output1 = self.embedding_net(x1)
output2 = self.embedding_net(x2)
output3 = self.embedding_net(x3)
return output1, output2, output3
def get_embedding(self, x):
return self.embedding_net(x)
class ENMEmbeddingNet(nn.Module):
def __init__(self):
super(ENMEmbeddingNet, self).__init__()
self.fc = nn.Sequential(nn.Linear(1024, 1024),
nn.PReLU(),
nn.Dropout(p=0.5),
nn.Linear(1024, 1024),
nn.PReLU(),
nn.Dropout(p=0.5),
nn.Linear(1024, 1024)
)
self.name = 'ENMEmb'
def forward(self, x):
output = self.fc(x)
return output
def get_embedding(self, x):
return self.forward(x)
|
from unittest import TestCase
from piccolo.columns import ForeignKey, Varchar
from piccolo.columns.base import OnDelete, OnUpdate
from piccolo.table import Table
class Manager(Table):
name = Varchar()
class Band(Table):
"""
Contains a ForeignKey with non-default `on_delete` and `on_update` values.
"""
manager = ForeignKey(
references=Manager,
on_delete=OnDelete.set_null,
on_update=OnUpdate.set_null,
)
class TestForeignKeyMeta(TestCase):
"""
Make sure that `ForeignKeyMeta` is setup correctly.
"""
def test_foreignkeymeta(self):
self.assertTrue(
Band.manager._foreign_key_meta.on_update == OnUpdate.set_null
)
self.assertTrue(
Band.manager._foreign_key_meta.on_delete == OnDelete.set_null
)
self.assertTrue(Band.manager._foreign_key_meta.references == Manager)
|
import numpy as np
from sklearn.base import BaseEstimator
from keras_preprocessing.image import Iterator
from tensorflow.keras.utils import Sequence
from sklearn.utils.validation import indexable
class FastaIterator(Iterator, BaseEstimator, Sequence):
"""Base class for fasta sequence iterators.
Parameters
----------
n : int
Total number of samples
batch_size : int
Size of batch
shuffle : bool
Whether to shuffle data between epoch
seed : int
Random seed number for data shuffling
"""
white_list_formats = {'fasta', 'fa'}
def __init__(self, n, batch_size=32, shuffle=True, seed=0):
super(FastaIterator, self).__init__(n, batch_size, shuffle, seed)
class FastaToArrayIterator(FastaIterator):
"""Iterator yielding Numpy array from fasta sequences
Parameters
----------
X : array
Contains sequence indexes in the fasta file
generator : fitted object
instance of BatchGenerator, e.g., FastaDNABatchGenerator
or FastaProteinBatchGenerator
y : array
Target labels or values
batch_size : int, default=32
shuffle : bool, default=True
Whether to shuffle the data between epochs
sample_weight : None or array
Sample weight
seed : int
Random seed for data shuffling
"""
def __init__(self, X, generator, y=None, batch_size=32,
shuffle=True, sample_weight=None, seed=None):
X, y, sample_weight = indexable(X, y, sample_weight)
self.X = X
self.generator = generator
self.y = y
self.sample_weight = sample_weight
super(FastaToArrayIterator, self).__init__(
X.shape[0], batch_size, shuffle, seed)
def _get_batches_of_transformed_samples(self, index_array):
generator = self.generator
index_array = np.asarray(index_array)
n_samples = index_array.shape[0]
batch_x = np.zeros((n_samples,
generator.seq_length,
generator.n_bases), dtype='float32')
for i in np.arange(n_samples):
seq_idx = int(self.X[index_array[i], 0])
batch_x[i] = generator.apply_transform(seq_idx)
output = (batch_x,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
|
import jpype
from jpype.types import *
import common
import collections.abc
class CollectionTestCase(common.JPypeTestCase):
def setUp(self):
super(CollectionTestCase, self).setUp()
def testCollection(self):
collection = jpype.java.util.ArrayList()
collection.add(1)
collection.add(2)
self.assertEqual([1, 2], [i for i in collection])
def testIterateHashmap(self):
collection = jpype.java.util.HashMap()
collection.put('A', 1)
collection.put('B', 2)
asdict = dict()
for x in collection.entrySet():
asdict[str(x.getKey())] = x.getValue().longValue()
self.assertEqual(asdict, {'A': 1, 'B': 2})
def testEnumMap(self):
enumclass = jpype.JClass('jpype.collection.TestEnum')
enummap = jpype.java.util.EnumMap(enumclass)
enummap.put(enumclass.A, 'ABC')
enummap.put(enumclass.B, 'DEF')
asdict = dict()
for x in enummap.entrySet():
asdict[str(x.getKey())] = x.getValue()
self.assertEqual({'A': 'ABC', 'B': 'DEF'}, asdict)
def testMapPut(self):
jmap = jpype.JClass("java.util.HashMap")()
jmap["a"] = 1
self.assertEqual(jmap["a"], 1)
def testMapPutAll(self):
jmap = jpype.JClass("java.util.HashMap")()
dic = {"a": "1", "b": "2", "c": "3"}
jmap.putAll(dic)
self.assertEqual(jmap["a"], "1")
self.assertEqual(jmap["b"], "2")
self.assertEqual(jmap["c"], "3")
with self.assertRaises(TypeError):
jmap.putAll([1, 2, 3])
def testListGet(self):
jlist = jpype.JClass("java.util.ArrayList")()
jlist.addAll([1, 2, 3, 4])
self.assertEqual(jlist[0], 1)
self.assertEqual(jlist[3], 4)
self.assertEqual(jlist[-1], 4)
self.assertEqual(jlist[-4], 1)
def testListSlice(self):
jlist = jpype.JClass("java.util.ArrayList")()
jlist.addAll([1, 2, 3, 4])
jlist[1:3] = [5, 6]
self.assertEqual(jlist[1], 5)
self.assertEqual(jlist[2], 6)
def testListDel(self):
jlist = jpype.JClass("java.util.ArrayList")()
jlist.addAll([1, 2, 3, 4])
del jlist[0]
self.assertEqual(len(jlist), 3)
self.assertEqual(jlist[0], 2)
def testCollectionAddAll(self):
l = [1, 2, 3, 4]
l2 = ['a', 'b']
jlist = jpype.JClass("java.util.ArrayList")()
jlist.addAll(l)
jcollection = jpype.JObject(jlist, jpype.java.util.Collection)
jcollection.addAll(l2)
l.extend(l2)
self.assertEqual(l, list(jcollection))
def testListSetItemNeg(self):
l = [1, 2, 3, 4]
jlist = jpype.JClass("java.util.ArrayList")()
jlist.addAll([1, 2, 3, 4])
jlist[-1] = 5
l[-1] = 5
self.assertEqual(l, list(jlist))
jlist[-2] = 6
l[-2] = 6
self.assertEqual(l, list(jlist))
with self.assertRaises(IndexError):
jlist[-5] = 6
def testMapKeyError(self):
hm = JClass('java.util.HashMap')()
with self.assertRaises(KeyError):
hm['foo']
hm['foo'] = None
self.assertEqual(hm['foo'], None)
def testHashMapEntryIter(self):
hm = JClass('java.util.HashMap')()
hm['alice'] = 'alice'
hm['betty'] = 'betty'
hm['catty'] = 'catty'
for p, v in hm.entrySet():
self.assertEqual(p, v)
def testTreeMapEntryIter(self):
hm = JClass('java.util.TreeMap')()
hm['alice'] = 'alice'
hm['betty'] = 'betty'
hm['catty'] = 'catty'
for p, v in hm.entrySet():
self.assertEqual(p, v)
def testSetDelItem(self):
hs = JClass('java.util.HashSet')()
hs.add('a')
hs.add('b')
hs.add('c')
self.assertIn('a', hs)
del hs['a']
self.assertNotIn('a', hs)
def testMapEntry(self):
hm = JClass('java.util.TreeMap')()
hm['alice'] = 'alice'
h = hm.entrySet()
self.assertEqual(len(h.iterator().next()), 2)
def testListIter(self):
ls = JClass('java.util.ArrayList')([0, 1, 2, 3])
for i, j in enumerate(ls):
self.assertEqual(i, j)
def testEnumeration(self):
st = JClass('java.util.StringTokenizer')("this is a test")
out = []
for i in st:
out.append(str(i))
self.assertEqual(len(i), 4)
self.assertEqual(" ".join(out), "this is a test")
def testCollectionDelItem(self):
ja = JClass('java.util.ArrayList')(['1', '2', '3'])
jc = JObject(ja, 'java.util.Collection')
with self.assertRaisesRegex(TypeError, 'remove'):
del jc[1]
def testHashMapCtor(self):
HashMap = JClass('java.util.HashMap')
dc = dict()
dc['fred'] = 1
dc['george'] = 2
dc['paul'] = 3
hm = HashMap(dc)
for p, v in dc.items():
self.assertEqual(hm[p], v)
def testHashMapPutAll(self):
HashMap = JClass('java.util.HashMap')
hm = HashMap()
dc = dict()
dc['fred'] = 1
dc['george'] = 2
dc['paul'] = 3
hm.putAll(dc)
for p, v in dc.items():
self.assertEqual(hm[p], v)
def testHashMapConvert(self):
HashMap = JClass('java.util.HashMap')
hm = HashMap()
hm['fred'] = 1
hm['george'] = 2
hm['paul'] = 3
dc = dict(hm)
for p, v in hm.items():
self.assertEqual(dc[p], v)
def testMapABC(self):
from collections.abc import Mapping, Sized, Iterable, Container
hm = JClass('java.util.HashMap')()
self.assertIsInstance(hm, Sized)
self.assertIsInstance(hm, Iterable)
self.assertIsInstance(hm, Container)
self.assertIsInstance(hm, Mapping)
def testUnmodifiableNext(self):
ArrayList = JClass('java.util.ArrayList')
Collections = JClass('java.util.Collections')
a = ArrayList()
a.add("first")
a.add("second")
a.add("third")
for i in a:
pass
for i in Collections.unmodifiableList(a):
pass
@common.requirePythonAfter((3, 6, 0))
def testListABC(self):
l = ['a', 'b', 'c', 'b']
JList = jpype.JClass('java.util.ArrayList')
al = JList(l)
for i, j in zip(reversed(al), reversed(l)):
self.assertEqual(i, j)
self.assertEqual(object() in al, object() in l)
self.assertEqual('a' in al, 'a' in l)
self.assertEqual(al.index('b'), l.index('b'))
self.assertEqual(al.count('b'), l.count('b'))
with self.assertRaises(ValueError):
al.index(object())
self.assertEqual(al.count(object()), l.count(object()))
self.assertIsInstance(al, collections.abc.Sequence)
self.assertIsInstance(al, collections.abc.Reversible)
self.assertIsInstance(al, collections.abc.Collection)
self.assertIsInstance(al, collections.abc.Iterable)
self.assertIsInstance(al, collections.abc.Sized)
@common.requirePythonAfter((3, 6, 0))
def testCollectionABC(self):
JCollection = jpype.JClass('java.util.Collection')
self.assertFalse(issubclass(JCollection, collections.abc.Sequence))
self.assertFalse(issubclass(JCollection, collections.abc.Reversible))
self.assertTrue(issubclass(JCollection, collections.abc.Collection))
self.assertTrue(issubclass(JCollection, collections.abc.Iterable))
self.assertTrue(issubclass(JCollection, collections.abc.Sized))
|
import math
import xml.etree.ElementTree as ET
from optparse import OptionParser
svg_prefix = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="210mm"
height="297mm"
viewBox="0 0 744.09448819 1052.3622047"
id="svg4154"
version="1.1"
inkscape:version="0.91 r13725"
sodipodi:docname="blank.svg">
<defs
id="defs4156" />
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="0.35"
inkscape:cx="-467.86"
inkscape:cy="405.71"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="false"
inkscape:window-width="1920"
inkscape:window-height="1017"
inkscape:window-x="-8"
inkscape:window-y="-8"
inkscape:window-maximized="1" />
<metadata
id="metadata4159">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title />
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1">
"""
svg_suffix = " </g>\n</svg>"
def brd_to_svg(boardpath, layer, PIXELS_PER_MM, PAD_SHRINK_MM):
if boardpath[-4:].lower() != ".brd": raise SystemExit("File must be a .BRD file")
print "\nProcessing " + layer + " layer of BRD file " + boardpath + "..."
layercode = 1
if layer == "bottom": layercode = 16
try:
#PARSE BRD AS XML TREE
tree = ET.parse(boardpath)
root = tree.getroot()
#BUILD DICT OF SMD PADS, BY LIBRARY AND PACKAGE NAMES
smd_dict = {}
for lib in root.findall('./drawing/board/libraries/library'):
libname = lib.get("name")
for pkg in lib.findall('./packages/package'):
pkgname = pkg.get('name')
smds = pkg.findall('smd')
if len(smds) > 0:
if libname not in smd_dict: smd_dict[libname] = {}
if pkgname not in smd_dict[libname]: smd_dict[libname][pkgname] = {}
for smd in smds:
(srot, smir, sspin) = parse_rot(smd.get('rot'))
smd_dict[libname][pkgname][smd.get('name')] = {"name":smd.get('name'), "x":float(smd.get('x')), "y":float(smd.get('y')), "dx":float(smd.get('dx')), "dy":float(smd.get('dy')), "rot":float(srot), "layer":int(smd.get('layer'))}
pad = smd_dict[libname][pkgname][smd.get('name')]
#FIND ALL ELEMENTS MATCHING SMD DICT ON LIBRARY AND PACKAGE NAMES
pads_dict = {}
svg_bbox = [[0,0],[0,0]]
for elem in root.findall('./drawing/board/elements/element'):
elib = elem.get('library')
epkg = elem.get('package')
ename = elem.get('name')
if(elib in smd_dict):
if(epkg in smd_dict[elib]):
ex = float(elem.get('x'))
ey = float(elem.get('y'))
(erot, emir, espin) = parse_rot(elem.get('rot'))
erot = float(erot) * math.pi/180.0
for smd in smd_dict[elib][epkg]:
smd_layer = 1 #presume layer = 1, emir = 0
pad = smd_dict[elib][epkg][smd]
if pad['layer'] == 1 and emir == 1: smd_layer = 16
elif pad['layer'] == 16 and emir == 0: smd_layer = 16
elif pad['layer'] == 16 and emir == 1: smd_layer = 1
if smd_layer == layercode:
px = float(pad['x'])
py = float(pad['y'])
dx = float(pad['dx']) - 2.0 * PAD_SHRINK_MM
dy = float(pad['dy']) - 2.0 * PAD_SHRINK_MM
rot = pad['rot'] * math.pi / 180.0
hx = dx/2.0
hy = dy/2.0
coords = [[hx,hy],[hx-dx,hy],[hx-dx,hy-dy],[hx,hy-dy]]
for c in coords:
cx = c[0] * math.cos(rot) - c[1] * math.sin(rot)
cy = c[0] * math.sin(rot) + c[1] * math.cos(rot)
#Position of pads in package in library, with package centroid at origin:
c[0] = cx + px
c[1] = cy + py
#Now rotate each point in coords by the element's rotation, and offset resulting coords by the element centroid
padsname = ename + "_" + pad['name']
for c in coords:
cx = c[0] * math.cos(erot) - c[1] * math.sin(erot)
cy = c[0] * math.sin(erot) + c[1] * math.cos(erot)
c[0] = cx + ex
c[1] = cy + ey
#convert mm to pixels and get full bounding box
pix_x = c[0] * PIXELS_PER_MM
pix_y = c[1] * PIXELS_PER_MM
if pix_x < svg_bbox[0][0]: svg_bbox[0][0] = pix_x
if pix_y < svg_bbox[0][1]: svg_bbox[0][1] = pix_y
if pix_x > svg_bbox[1][0]: svg_bbox[1][0] = pix_x
if pix_y > svg_bbox[1][1]: svg_bbox[1][1] = pix_y
#Correctly placed points for each pad of each element on the board, in pixels
pads_dict[padsname] = coords
svg_infix = ""
for k,v in pads_dict.iteritems():
#build SVG polygon element tags, inverting y coordinates to match SVG coordinate system
coords_string = ""
for c in v:
coords_string += str(c[0] * PIXELS_PER_MM) + "," + str(svg_bbox[1][1] - c[1] * PIXELS_PER_MM) + " "
svg_infix += "<polygon points=\"" + coords_string[:-1] + "\" style=\"fill:black;stroke:black;stroke-width:0\" />\n"
svg_text = svg_prefix + svg_infix + svg_suffix
svg_out = open(boardpath[:-4] + "_" + layer + ".svg", "w")
svg_out.write(svg_text)
svg_out.close()
print "DONE"
except Exception as e: print str(e)
def parse_rot(rotstr):
rot = 0
mir = 0
spin = 0
if rotstr != None:
if "M" in rotstr: mir = 1
if "S" in rotstr: spin = 1
rot = ''.join(c for c in rotstr if c.isdigit())
return (rot, mir, spin)
def main():
parser = OptionParser()
parser.add_option("-f", "--file", dest="file", help="make svg of pads from BRD", metavar="BRD")
parser.add_option("-F", "--folder", dest="folder", help="make svg of pads from folder of BRDs", metavar="FLD")
parser.add_option("-l", "--layer", dest="layer", help="t=top, b=bottom", default="t")
parser.add_option("-p", "--pixels", action="store", type="int", dest="ppi", default=90, help="pixels per inch (90 or 96 for Inkscape)")
parser.add_option("-s", "--shrink", action="store", type="float", dest="shr", default=2, help="mils to shrink pad mask by (default 2)")
(options, args) = parser.parse_args()
ppm = 3.543307
if options.ppi != None: ppm = float(options.ppi) / 25.4
shrink = 0.0508
if options.shr != None: shrink = float(options.shr) * 0.0254
layer = "top"
if options.layer == "b": layer = "bottom"
f = options.file
F = options.folder
if f == None and F == None: raise SystemExit("\n*You must specify a folder or a file...")
elif f == None: raise SystemExit("\nFolder operations are not yet supported...")
elif F == None: brd_to_svg(f, layer, ppm, shrink)
else:
print "\N* File and folder arguments detected. Defaulting to file..."
brd_to_svg(f, layer, ppm, shrink)
if __name__ == '__main__':
main()
|
from Games.GameLogic import InARowGameSquareBoard
import numpy as np
from Games.GameLogic import bitboard
class MnkInARow(InARowGameSquareBoard):
default_kwargs = {
"rows": 10,
"columns": 10,
"in_a_row_to_win": 6
}
def __init__(self, **kwargs):
super().__init__()
self.kwargs = self.default_kwargs.copy()
self.kwargs.update(kwargs)
self.rows = self.kwargs.get("rows")
self.columns = self.kwargs.get("columns")
self.in_a_row_to_win = self.kwargs.get("in_a_row_to_win")
self.num_squares = self.columns * self.rows
self.board = np.zeros((self.num_squares,), dtype=int)
self.fv_size = self.num_squares * 2
self.num_actions = self.num_squares
self.kwargs = kwargs
self.__name__ = "MnkInARow" + str(self.rows) + "x" + str(self.columns) + "_" \
+ str(self.in_a_row_to_win)
if (self.rows, self.columns, self.in_a_row_to_win) == (3, 3, 3):
self.__name__ = "TicTacToe"
elif self.in_a_row_to_win == 6:
self.__name__ = "ConnectSix" + str(self.rows) + "x" + str(self.columns)
elif self.in_a_row_to_win == 5:
self.__name__ = "Gomoku" + str(self.rows) + "x" + str(self.columns)
def new(self):
return MnkInARow(**self.kwargs)
def copy(self):
board_copy = MnkInARow(**self.kwargs)
board_copy.board = self.board.copy()
board_copy.winner = self.winner
board_copy.turn = self.turn
return board_copy
def get_legal_moves(self):
""" Return a list of the possible action indexes """
if self.is_game_over():
return []
return np.where(self.board[:self.num_actions] == 0, 1, 0).nonzero()[0]
def advance(self, a):
if self.winner is not None:
raise Exception("Cannot advance when game is over")
if a is None:
raise Exception("action_index can not be None")
if self.board[a] != 0:
raise Exception("This column is full")
if a >= self.num_actions or a < 0:
raise Exception("Action is not legal")
board_value = self.player_index_to_board_value(player_index=self.turn)
self.board[a] = board_value
self.update_game_state()
def update_game_state(self):
self.update_in_a_row_game()
self.next_turn()
# Is the game a draw.
if self.is_draw():
self.winner = -1
def get_augmentations(self, s_array, pi_array, v_array):
return self.get_all_augmentations(s_array, pi_array, v_array)
def get_feature_vector(self):
return bitboard(self.board, self.player_index_to_board_value(self.turn))
def next_turn(self):
""" Next turn is always the other player in this game """
self.turn += 1
if self.turn >= self.num_players:
self.turn = 0
def display(self):
char_board = ""
for x in self.board:
if x == 0: char_board += '-'
if x == 1: char_board += 'x'
if x == 2: char_board += 'o'
print("*** Print of " + str(type(self).__name__) + " game ***")
c = self.columns
for r in range(c):
print(char_board[r*c:r*c + c])
print()
|
# coding=utf-8
# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for TAPEX."""
import json
import os
import random
from contextlib import contextmanager
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...file_utils import ExplicitEnum, PaddingStrategy, TensorType, add_end_docstrings, is_pandas_available
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import ENCODE_KWARGS_DOCSTRING, BatchEncoding, TextInput, TruncationStrategy
from ...utils import logging
if is_pandas_available():
import pandas as pd
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/tapex-base": "https://huggingface.co/microsoft/tapex-base/resolve/main/vocab.json",
},
"merges_file": {
"microsoft/tapex-base": "https://huggingface.co/microsoft/tapex-base/resolve/main/merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/tapex-base": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"microsoft/tapex-base": {"do_lower_case": True},
}
class TapexTruncationStrategy(ExplicitEnum):
"""
Possible values for the `truncation` argument in [`~TapasTokenizer.__call__`]. Useful for tab-completion in an IDE.
"""
DROP_ROWS_TO_FIT = "drop_rows_to_fit"
class TokenizerStrategy(ExplicitEnum):
TOKENIZE_SOURCE = "tokenize_source"
TOKENIZE_TARGET = "tokenize_target"
TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
add_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to encode the sequences with the special tokens relative to their model.
padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str`, [`TapexTruncationStrategy`] or [`~tokenization_utils_base.TruncationStrategy`],
*optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will truncate
row by row, removing rows from the table.
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
to the maximum acceptable input length for the model if that argument is not provided. This will
truncate token by token, removing a token from the longest sequence in the pair if a pair of
sequences (or a batch of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
maximum acceptable input length for the model if that argument is not provided. This will only
truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to
`None`, this will use the predefined model maximum length if a maximum length is required by one of the
truncation/padding parameters. If the model has no specific maximum input length (like XLNet)
truncation/padding to a maximum length will be deactivated.
stride (`int`, *optional*, defaults to 0):
If set to a number along with `max_length`, the overflowing tokens returned when
`return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
returned to provide some overlap between truncated and overflowing sequences. The value of this
argument defines the number of overlapping tokens.
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
"""
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large #
of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset
you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe
vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
"""
bs = (
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""
Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length
strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class IndexedRowTableLinearize:
"""
FORMAT: col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : ...
"""
def process_table(self, table_content: Dict):
"""
Given a table, TableLinearize aims at converting it into a flatten sequence with special symbols.
"""
assert "header" in table_content and "rows" in table_content, self.PROMPT_MESSAGE
# process header
table_str = self.process_header(table_content["header"]) + " "
# process rows
for i, row_example in enumerate(table_content["rows"]):
# NOTE: the row should start from row 1 instead of 0
table_str += self.process_row(row_example, row_index=i + 1) + " "
return table_str.strip()
def process_header(self, headers: List):
"""
Given a list of headers, TableLinearize aims at converting it into a flatten sequence with special symbols.
"""
return "col : " + " | ".join(headers)
def process_row(self, row: List, row_index: int):
"""
Given a row, TableLinearize aims at converting it into a flatten sequence with special symbols.
"""
row_str = ""
row_cell_values = []
for cell_value in row:
if isinstance(cell_value, int):
row_cell_values.append(str(cell_value))
else:
row_cell_values.append(cell_value)
row_str += " | ".join(row_cell_values)
return "row " + str(row_index) + " : " + row_str
class TapexTokenizer(PreTrainedTokenizer):
r"""
Construct a TAPEX tokenizer. Based on byte-level Byte-Pair-Encoding (BPE).
This tokenizer can be used to flatten one or more table(s) and concatenate them with one or more related sentences
to be used by TAPEX models. The format that the TAPEX tokenizer creates is the following:
sentence col: col1 | col2 | col 3 row 1 : val1 | val2 | val3 row 2 : ...
The tokenizer supports a single table + single query, a single table and multiple queries (in which case the table
will be duplicated for every query), a single query and multiple tables (in which case the query will be duplicated
for every table), and multiple tables and queries. In other words, you can provide a batch of tables + questions to
the tokenizer for instance to prepare them for the model.
Tokenization itself is based on the BPE algorithm. It is identical to the one used by BART, RoBERTa and GPT-2.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
Path to the vocabulary file.
merges_file (`str`):
Path to the merges file.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
bos_token (`str`, *optional*, defaults to `"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the `cls_token`.
</Tip>
eos_token (`str`, *optional*, defaults to `"</s>"`):
The end of sequence token.
<Tip>
When building a sequence using special tokens, this is not the token that is used for the end of sequence.
The token used is the `sep_token`.
</Tip>
sep_token (`str`, *optional*, defaults to `"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (`str`, *optional*, defaults to `"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (`str`, *optional*, defaults to `"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (`str`, *optional*, defaults to `"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (`str`, *optional*, defaults to `"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (BART tokenizer detect beginning of words by the preceding space).
max_cell_length (`int`, *optional*, defaults to 15):
Maximum number of characters per cell when linearizing a table. If this number is exceeded, truncation
takes place.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
model_input_names = ["input_ids", "attention_mask"]
def __init__(
self,
vocab_file,
merges_file,
do_lower_case=True,
errors="replace",
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
add_prefix_space=False,
max_cell_length=15,
**kwargs
):
bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
super().__init__(
vocab_file=vocab_file,
merges_file=merges_file,
do_lower_case=do_lower_case,
errors=errors,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
max_cell_length=max_cell_length,
**kwargs,
)
with open(vocab_file, encoding="utf-8") as vocab_handle:
self.encoder = json.load(vocab_handle)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with open(merges_file, encoding="utf-8") as merges_handle:
bpe_merges = merges_handle.read().split("\n")[1:-1]
bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.add_prefix_space = add_prefix_space
self.do_lower_case = do_lower_case
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
# additional properties
self.max_cell_length = max_cell_length
self.table_linearize = IndexedRowTableLinearize()
# property to decide using which call function
self.current_tokenizer = TokenizerStrategy.TOKENIZE_SOURCE
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A TAPEX sequence has the following format:
- single sequence: `<s> X </s>`
- pair of sequences: `<s> A </s></s> B </s>`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Args:
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Args:
Create a mask from the two sequences passed to be used in a sequence-pair classification task. TAPEX does not:
make use of token type ids, therefore a list of zeros is returned.
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
text = " " + text
return (text, kwargs)
@property
def vocab_size(self):
return len(self.encoder)
def get_vocab(self):
return dict(self.encoder, **self.added_tokens_encoder)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def _tokenize(self, text):
"""Tokenize a string."""
bpe_tokens = []
for token in re.findall(self.pat, text):
token = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")
) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
return bpe_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.encoder.get(token, self.encoder.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.decoder.get(index)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
text = "".join(tokens)
text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
merge_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
)
with open(vocab_file, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, ensure_ascii=False))
index = 0
with open(merge_file, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!"
)
index = token_index
writer.write(" ".join(bpe_tokens) + "\n")
index += 1
return vocab_file, merge_file
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def __call__(
self,
table: Union["pd.DataFrame", List["pd.DataFrame"]] = None,
query: Optional[Union[TextInput, List[TextInput]]] = None,
answer: Union[str, List[str]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Main method to tokenize and prepare for the model one or several table-sequence pair(s).
Args:
table (`pd.DataFrame`, `List[pd.DataFrame]`):
Table(s) containing tabular data.
query (`str` or `List[str]`, *optional*):
Sentence or batch of sentences related to one or more table(s) to be encoded. Note that the number of
sentences must match the number of tables.
answer (`str` or `List[str]`, *optional*):
Optionally, the corresponding answer to the questions as supervision.
"""
if self.current_tokenizer == TokenizerStrategy.TOKENIZE_SOURCE:
if table is None:
raise ValueError("Please ensure that the table is not empty if you use TAPEX to encode source.")
return self.source_call_func(
table=table,
query=query,
answer=answer,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
if answer is None:
raise ValueError("Please ensure that the answer is not empty if you use TAPEX to encode target.")
return self.target_call_func(
answer=answer,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def source_call_func(
self,
table: Union["pd.DataFrame", List["pd.DataFrame"]],
query: Optional[Union[TextInput, List[TextInput]]] = None,
answer: Union[str, List[str]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
# Input type checking for clearer error
valid_table = False
valid_query = False
# Check that table have a valid type
if isinstance(table, pd.DataFrame):
valid_table = True
elif isinstance(table, (list, tuple)) and isinstance(table[0], pd.DataFrame):
valid_table = True
# Check that query have a valid type
if query is None or isinstance(query, str):
valid_query = True
elif isinstance(query, (list, tuple)):
if len(query) == 0 or isinstance(query[0], str):
valid_query = True
if not valid_table:
raise ValueError(
"table input must of type `pd.DataFrame` (single example), `List[pd.DataFrame]` (batch of examples). "
)
if not valid_query:
raise ValueError("query input must of type `str` (single example), `List[str]` (batch of examples). ")
is_batched = isinstance(table, (list, tuple)) or isinstance(query, (list, tuple))
if is_batched:
return self.batch_encode_plus(
table=table,
query=query,
answer=answer,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.encode_plus(
table=table,
query=query,
answer=answer,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def batch_encode_plus(
self,
table: Union["pd.DataFrame", List["pd.DataFrame"]],
query: Optional[List[TextInput]] = None,
answer: List[str] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str] = False,
max_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
<Tip warning={true}>
This method is deprecated, `__call__` should be used instead.
</Tip>
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._batch_encode_plus(
table=table,
query=query,
answer=answer,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _batch_encode_plus(
self,
table: Union["pd.DataFrame", List["pd.DataFrame"]],
query: Optional[List[TextInput]] = None,
answer: Optional[List[str]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast."
)
if isinstance(table, pd.DataFrame) and isinstance(query, (list, tuple)):
# single table, many queries case
# duplicate table for every query
table = [table] * len(query)
if isinstance(table, (list, tuple)) and isinstance(query, str):
# many tables, single query case
# duplicate query for every table
query = [query] * len(table)
batch_outputs = self._batch_prepare_for_model(
table=table,
query=query,
answer=answer,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=return_tensors,
verbose=verbose,
)
return BatchEncoding(batch_outputs)
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def _batch_prepare_for_model(
self,
table: Union["pd.DataFrame", List["pd.DataFrame"]],
query: Optional[Union[TextInput, List[TextInput]]] = None,
answer: Optional[Union[str, List[str]]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> BatchEncoding:
"""
This method adds special tokens, truncates sequences if overflowing while taking into account the special
tokens and manages a moving window (with user defined stride) for overflowing tokens.
"""
batch_outputs = {}
if answer is None:
answer = [None] * len(table)
for _table, _query, _answer in zip(table, query, answer):
text = self.prepare_table_query(
_table, _query, _answer, truncation_strategy=truncation_strategy, max_length=max_length
)
if self.do_lower_case:
text = text.lower()
tokens = self.tokenize(text)
outputs = self.prepare_for_model(
ids=self.convert_tokens_to_ids(tokens),
add_special_tokens=add_special_tokens,
padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterwards
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=None, # we pad in batch afterwards
return_attention_mask=False, # we pad in batch afterwards
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=None, # We convert the whole batch to tensors at the end
prepend_batch_axis=False,
verbose=verbose,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(
batch_outputs,
padding=padding_strategy.value,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return batch_outputs
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING)
def encode(
self,
table: "pd.DataFrame",
query: Optional[TextInput] = None,
answer: Optional[str] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy] = False,
max_length: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs
) -> List[int]:
"""
Prepare a table, a string and possible answer for the model. This method does not return token type IDs,
attention masks, etc. which are necessary for the model to work correctly. Use this method if you want to build
your processing on your own, otherwise refer to `__call__`.
"""
encoded_inputs = self.encode_plus(
table,
query=query,
answer=answer,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
return_tensors=return_tensors,
**kwargs,
)
return encoded_inputs["input_ids"]
@add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPEX_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
def encode_plus(
self,
table: "pd.DataFrame",
query: Optional[TextInput] = None,
answer: Optional[str] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str] = False,
max_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._encode_plus(
table=table,
query=query,
answer=answer,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _encode_plus(
self,
table: "pd.DataFrame",
query: Optional[TextInput] = None,
answer: Optional[str] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast. "
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
text = self.prepare_table_query(
table, query, answer, truncation_strategy=truncation_strategy, max_length=max_length
)
# if necessary, perform lower case
if self.do_lower_case:
text = text.lower()
tokens = self.tokenize(text)
return self.prepare_for_model(
ids=self.convert_tokens_to_ids(tokens),
add_special_tokens=add_special_tokens,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
prepend_batch_axis=True,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
verbose=verbose,
)
def target_call_func(
self,
answer: Union[str, List[str]],
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
The method tokenizes and prepares the answer label for the model.
Args:
answer (`str` or `List[str]`):
Corresponding answer supervision to the queries for training the model.
"""
is_batched = isinstance(answer, (list, tuple))
if is_batched:
return self.target_batch_encode_plus(
answer=answer,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
else:
return self.target_encode_plus(
answer=answer,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def target_batch_encode_plus(
self,
answer: List[str],
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str] = False,
max_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Prepare answer strings for the model.
Args:
answer `List[str]`:
Corresponding answer supervision to the queries for training the model.
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._target_batch_encode_plus(
answer=answer,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _target_batch_encode_plus(
self,
answer: List[str],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
batch_outputs = {}
for text in answer:
if self.do_lower_case:
text = text.lower()
tokens = self.tokenize(text)
outputs = self.prepare_for_model(
ids=self.convert_tokens_to_ids(tokens),
add_special_tokens=add_special_tokens,
padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterwards
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=None, # we pad in batch afterwards
return_attention_mask=False, # we pad in batch afterwards
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
return_tensors=None, # We convert the whole batch to tensors at the end
prepend_batch_axis=False,
verbose=verbose,
)
for key, value in outputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
batch_outputs = self.pad(
batch_outputs,
padding=padding_strategy.value,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=return_attention_mask,
)
batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
return BatchEncoding(batch_outputs)
def target_encode(
self,
answer: str,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy, TapexTruncationStrategy] = False,
max_length: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs
) -> List[int]:
"""
Prepare the answer string for the model. This method does not return token type IDs, attention masks, etc.
which are necessary for the model to work correctly. Use this method if you want to build your processing on
your own, otherwise refer to `__call__`.
Args:
answer `str`:
Corresponding answer supervision to the queries for training the model
"""
encoded_outputs = self.target_encode_plus(
answer=answer,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
return_tensors=return_tensors,
**kwargs,
)
return encoded_outputs["input_ids"]
def target_encode_plus(
self,
answer: str,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str] = False,
max_length: Optional[int] = None,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
"""
Prepare a answer string for the model.
Args:
answer `str`:
Corresponding answer supervision to the queries for training the model.
"""
# Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
padding=padding,
truncation=truncation,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
verbose=verbose,
**kwargs,
)
return self._target_encode_plus(
answer=answer,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
def _target_encode_plus(
self,
answer: str,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
if return_offsets_mapping:
raise NotImplementedError(
"return_offset_mapping is not available when using Python tokenizers. "
"To use this feature, change your tokenizer to one deriving from "
"transformers.PreTrainedTokenizerFast. "
"More information on available tokenizers at "
"https://github.com/huggingface/transformers/pull/2674"
)
text = answer
# if necessary, perform lower case
if self.do_lower_case:
text = text.lower()
tokens = self.tokenize(text)
return self.prepare_for_model(
ids=self.convert_tokens_to_ids(tokens),
add_special_tokens=add_special_tokens,
padding=padding_strategy.value,
truncation=truncation_strategy.value,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
prepend_batch_axis=True,
return_attention_mask=return_attention_mask,
return_token_type_ids=return_token_type_ids,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_length=return_length,
verbose=verbose,
)
@contextmanager
def as_target_tokenizer(self):
"""
Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to
sequence-to-sequence models that need a slightly different processing for the labels.
"""
self.current_tokenizer = TokenizerStrategy.TOKENIZE_TARGET
yield
# restore the call function
self.current_tokenizer = TokenizerStrategy.TOKENIZE_SOURCE
def prepare_table_query(
self,
table,
query,
answer=None,
truncation_strategy=Union[str, TruncationStrategy, TapexTruncationStrategy],
max_length=None,
):
"""
This method can be used to linearize a table and add a corresponding query.
Optionally, it also handles truncation of the table (cells).
An answer can be provided for more precise truncation.
"""
if not table.empty:
# step 1: create table dictionary
table_content = {"header": list(table.columns), "rows": [list(row.values) for i, row in table.iterrows()]}
# step 2: modify table internally
# always truncate table cells based on self.max_cell_length
# optionally truncate rows if truncation_strategy is set to it
self.truncate_table_cells(table_content, query, answer)
if truncation_strategy == TapexTruncationStrategy.DROP_ROWS_TO_FIT:
self.truncate_table_rows(table_content, query, answer, max_length=max_length)
# step 3: linearize table
linear_table = self.table_linearize.process_table(table_content)
else:
linear_table = ""
if linear_table == "":
logger.warning(
"You provide an empty table, or all cells contain much tokens (e.g., >= 1024 tokens). "
+ f"Please carefully check the corresponding table with the query : {query}."
)
if query == "":
logger.warning("You provide nothing to query with respect to the table.")
# step 4: concatenate query with linear_table
separator = " " if query and linear_table else ""
joint_input = (query + separator + linear_table) if query else linear_table
return joint_input
def truncate_table_cells(self, table_content: Dict, question: str, answer: List):
# TODO (Qian): is it possible to revert the original cell if it is in the final answer?
cell_mapping = {}
for row in table_content["rows"]:
for i, cell in enumerate(row):
truncate_cell = self.truncate_cell(cell)
if truncate_cell is not None:
cell_mapping[cell] = truncate_cell
row[i] = truncate_cell
# modify the answer list
if answer is not None:
for i, case in enumerate(answer):
if case in cell_mapping.keys():
answer[i] = cell_mapping[case]
def truncate_cell(self, cell_value):
# do not process on these cases
if isinstance(cell_value, int) or isinstance(cell_value, float):
return cell_value
if cell_value.strip() != "":
try_tokens = self.tokenize(cell_value)
if len(try_tokens) >= self.max_cell_length:
retain_tokens = try_tokens[: self.max_cell_length]
retain_cell_value = self.convert_tokens_to_string(retain_tokens)
return retain_cell_value
else:
return None
else:
return cell_value
def truncate_table_rows(
self, table_content: Dict, question: str, answer: Optional[Union[str, List[str]]] = None, max_length=None
):
"""
Args:
table_content:
{"header": xxx, "rows": xxx, "id" (Optionally): xxx}
question:
natural language sentence
answer:
if for training, is the supervision; otherwise will be empty
"""
delete_ratio, remain_token_len = self.estimate_delete_ratio(table_content, question, max_length)
# randomly delete unrelated rows
self.delete_unrelated_rows(table_content, question, answer, delete_ratio)
# guarantee the result < max_length
maximum_keep_rows = 0
for ind, row_example in enumerate(table_content["rows"]):
value_string = self.table_linearize.process_row(row_example, ind + 1)
value_token_len = len(self.tokenize(value_string))
# over the size limit, and take action
if value_token_len > remain_token_len:
break
remain_token_len -= value_token_len
maximum_keep_rows += 1
del table_content["rows"][maximum_keep_rows:]
def estimate_delete_ratio(self, table_content: Dict, question: str, max_length=None):
if "header" not in table_content or "rows" not in table_content:
raise ValueError("The table content should contain both 'header' and 'rows' keys.")
# calculate the tokens of header, special tokens will only be pre-prepended into question
question_tokens = self.tokenize(question, add_special_tokens=True)
# calculate the tokens of header
header_string = self.table_linearize.process_header(table_content["header"])
header_tokens = self.tokenize(header_string, add_special_tokens=False)
# split all cell values into tokens and see how many can be accommodated
used_token_len = len(question_tokens) + len(header_tokens)
# remaining token space for rows
remain_token_len = max_length - used_token_len
value_string = ""
for _, row_example in enumerate(table_content["rows"]):
# use a general index to roughly estimate the overall token len
value_string += self.table_linearize.process_row(row_example, 100) + " "
value_token_len = len(self.tokenize(value_string))
if value_token_len < remain_token_len:
# no row will be deleted
return 0.0, remain_token_len
else:
# calc a roughly delete rate
return 1.0 - remain_token_len / value_token_len, remain_token_len
def delete_unrelated_rows(self, table_content: Dict, question: str, answer: List, delete_ratio: float):
"""
The argument answer is used only during training.
"""
truncated_unrelated_indices = []
related_indices = []
if answer is None or len(answer) == 0:
answer_set = set([])
else:
answer_set = set([ans_ex.lower() for ans_ex in answer])
# add question key words into answer set
if question is not None:
answer_set.update(question.split())
question_set = set(question.strip("?!.,").split(" "))
row_max_len = len(table_content["rows"])
for _row_idx, row in enumerate(table_content["rows"]):
lower_row = set([str(cell).lower() for cell in row])
if len(lower_row & answer_set) == 0 and len(lower_row & question_set) == 0:
truncated_unrelated_indices.append(_row_idx)
else:
# add neighbours to preserve information aggressively
related_indices.extend([_row_idx - 2, _row_idx - 1, _row_idx, _row_idx + 1, _row_idx + 2])
# remove the neighbours
truncated_unrelated_indices = [
_row_idx for _row_idx in truncated_unrelated_indices if _row_idx not in related_indices
]
# select some cases to drop
drop_items = min(len(truncated_unrelated_indices), int(len(table_content["rows"]) * delete_ratio))
drop_row_indices = random.choices(truncated_unrelated_indices, k=drop_items)
for _row_idx in reversed(range(row_max_len)):
if _row_idx in drop_row_indices:
del table_content["rows"][_row_idx]
# only when the drop ratio is too large, logging for warning.
if "id" in table_content and len(drop_row_indices) > 0:
logger.warning("Delete {:.2f} rows in table {}".format(len(drop_row_indices), table_content["id"]))
|
#!/usr/bin/env python
# encoding: utf-8
import numpy as np
import tensorflow as tf
import sys
sys.path.append('..')
from models.run_net import SenseClsNet
from config import cfg
import cv2
import os
from tqdm import tqdm
import zipfile
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def accuracy(img_path, label_file, epoch):
forders = os.listdir(img_path)
labels_f = open(label_file, 'r')
labels = labels_f.readlines()
label_dict = dict()
for l in labels:
key_value = l.strip().split(':')[0::2]
label_dict.update({key_value[0] : key_value[1]})
is_training = False
cfg.batch_size = 1
ckpt_dir = cfg.ckpt_path
correct = 0
wrong = 0
all_image = 0
configer = tf.ConfigProto()
configer.gpu_options.per_process_gpu_memory_fraction = 0.1
with tf.Session(config=configer) as sess:
imgs_holder = tf.placeholder(tf.float32, shape=[1, 224, 224, 3])
model = SenseClsNet(imgs_holder, None, is_training)
classes, scores = model.predict()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
saver.restore(sess, ckpt_dir + 'senceCls-' + str(epoch))
sess.run(tf.local_variables_initializer())
for f in tqdm(forders):
label = float(label_dict[f])
imgs = os.listdir(os.path.join(img_path, f))
for img in imgs:
path = os.path.join(os.path.join(img_path, f), img)
img = cv2.imread(path)
image = cv2.resize(img, (224, 224))
img_data = image.astype(np.float32) / 255.0 * 2.0
all_image += 1
classes_index, scores_0 = sess.run([classes, scores], feed_dict={imgs_holder: np.reshape(img_data, [1, 224, 224, 3])})
if classes_index[0] + 1 == label:
correct += 1
else:
wrong += 1
accuracy = float(correct) / float(correct + wrong)
# print('global_step: ', g_step)
print("All images:\n {}".format(int(correct + wrong)))
print("Accuracy: {:.4f}".format(accuracy))
tf.reset_default_graph()
if __name__ == '__main__':
img_path = '../data/rssrai_sense_cls/train'
label_file = '../data/rssrai_sense_cls/ClsName2id.txt'
epoch = np.arange(12, 1, -1)
print(epoch)
for i in epoch:
print('================{}================'.format(i))
accuracy(img_path, label_file, i)
|
""":mod:`autotweet.database` --- Database structure
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides methods to get session, get answer, etc.
"""
from __future__ import unicode_literals
from sqlalchemy import (Column, Float, ForeignKey, Integer, String, Table,
UniqueConstraint, create_engine)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, scoped_session, sessionmaker
from .logger_factory import get_logger
__all__ = ('Base', 'Document', 'Gram', 'get_session')
Base = declarative_base()
GRAM_LENGTH = 2
logger = get_logger('database')
def get_session(url):
"""Get db session.
:param url: URL for connect with DB
:type url: :class:`str`
:returns: A sqlalchemy db session
:rtype: :class:`sqlalchemy.orm.Session`
"""
engine = create_engine(url)
db_session = scoped_session(sessionmaker(engine))
Base.metadata.create_all(engine)
return db_session
association_table = Table(
'association', Base.metadata,
Column('gram_id', Integer, ForeignKey('grams.id')),
Column('document_id', Integer, ForeignKey('documents.id'))
)
class Document(Base):
__tablename__ = 'documents'
__table_args__ = (
UniqueConstraint('text', 'answer'),
)
id = Column(Integer, primary_key=True)
text = Column(String(140), nullable=False)
answer = Column(String(140), nullable=False)
grams = relationship(
'Gram', secondary=association_table, backref='documents')
__table_args__ = (UniqueConstraint('text', 'answer'),)
def __init__(self, text, answer):
self.text = text
self.answer = answer
class Gram(Base):
__tablename__ = 'grams'
id = Column(Integer, primary_key=True)
gram = Column(String(GRAM_LENGTH), unique=True, nullable=False)
idf = Column(Float)
def __init__(self, gram):
self.gram = gram
|
# -*- coding: utf-8 -*-
"""
Plotting methods for graphing the distribution of measured quantities such as
reference monitor pollutant concentrations (``ref_distrib()``), meteorological
conditions including temperature and relative humidity (``met_distrib()``),
and the distribution of recording intervals (i.e., the time difference between
consecutive timestamps) in sensor datasets (``recording_interval_histogram()``).
================================================================================
@Author:
| Samuel Frederick, NSSC Contractor (ORAU)
| U.S. EPA / ORD / CEMM / AMCD / SFSB
Created:
Mon Jan 27 08:49:12 2020
Last Updated:
Wed Jul 28 14:20:18 2021
"""
import os
from pandas.plotting import register_matplotlib_converters
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sensortoolkit.param import Parameter
from sensortoolkit.datetime_utils import get_todays_date
register_matplotlib_converters()
sns.set_style('darkgrid')
def ref_distrib(ref_df, param=None, averaging_interval='1-hour',
font_size=18, write_to_file=True, figure_path=None,
filename_suffix=''):
"""Plot the distribution of reference values for the passed parameter.
Args:
ref_df (pandas DataFrame):
Dataframe containing reference data for the parameter ``'param'``
and logged at the specified ``'averaging_interval'`` .
param (str, optional):
The name of the parameter for which the distribution plot will show
the distribution of reference measurements. Defaults to None.
averaging_interval (str, optional):
The averaging interval for the passed dataframe. Defaults to
'1-hour'.
font_size (int or float, optional):
The font size for the figure. Defaults to 18.
write_to_file (bool, optional):
If true, the figure will be saved as a png image to the
``[project_path]/figures`` subdirectory. Defaults to True.
figure_path (str):
The full directory path to the folder where figures are saved.
This should be located at ``[project_path]/figures``.
filename_suffix (str, optional):
Optional suffix that can be added to the end of filenames to ensure
previously created files with similar naming are not overwritten.
Defaults to ''.
Returns:
None.
"""
try:
# Determine name of reference monitor from passed parameter name
try:
ref_name = ref_df[param + '_Method'].dropna().unique()[0]
except IndexError:
ref_name = 'Unspecified Reference'
# Format the parameter name for plotting
param_obj = Parameter(param)
param_name = param_obj.param_name
fmt_param = param_obj.param_format_name
fmt_param_units = param_obj.param_units
# Construct plot instance
fig, ax = plt.subplots(1, 1, figsize=(6, 5))
sns.distplot(ref_df[ref_name].dropna(),
label=ref_name +' ' + fmt_param, ax=ax)
# Set axes attributes
ax.set_xlabel(f'Reference {averaging_interval} {fmt_param} ({fmt_param_units})',
fontsize=font_size)
ax.set_ylabel('Relative Probability', fontsize=font_size)
ax.tick_params(axis='both', labelsize=0.75*font_size)
plt.legend(fontsize=0.85*font_size)
if write_to_file is True:
todays_date = get_todays_date()
figure_path = os.path.join(figure_path,
f'{ref_name}_DistPlot_{param_name}_{todays_date}')
if filename_suffix != '':
figure_path = figure_path + '_' + filename_suffix
figure_path += '.png'
plt.tight_layout()
plt.savefig(figure_path, dpi=300)
plt.close()
# Exception: Column name for reference monitor data not in passed df
except KeyError as i:
print(i, 'not found in passed reference dataframe')
def met_distrib(met_ref_data, avg_hrly_df, figure_path, sensor_name=None,
write_to_file=True):
"""Create distribution plots for meteorological parameters provided in the
passed met_ref_data dataframe.
Distributions are displayed as relative frequencies (i.e., percentages of
the total distribution of measurements).
Args:
met_ref_data (pandas DataFrame):
Meteorological reference data (1-hour averages) for temperature,
relative humidity, and dew point measurements.
avg_hrly_df (pandas DataFrame):
Dataframe containing the inter-sensor average value for 1-hour
averaged air sensor measurements.
figure_path (str):
The full directory path to the folder where figures are saved.
This should be located at ``[project_path]/figures``.
sensor_name (str, optional):
The name of the air sensor (make, manufacturer). Defaults to None.
write_to_file (bool, optional):
If true, the figure will be saved as a png image to the
``[project_path]/figures`` subdirectory. Defaults to True.
Returns:
None.
"""
font_size = 10
detail_font_size = 0.8*font_size
n_var = len(met_ref_data.count()) # Number of met variables to plot
fig, axs = plt.subplots(1, n_var, figsize=(5.15, 2.54))
fill_color = [['#77529A'], ['#b06c8b'], ['#588ded']]
plt.suptitle('Evaluation Site Meteorological Conditions\n',
fontsize=font_size)
fig.subplots_adjust(wspace=.6,
hspace=.3,
left=.12,
right=.88,
top=.86,
bottom=.17)
for i in range(n_var):
sensor_data = False
param = met_ref_data.columns[i]
data = met_ref_data[param].dropna()
if data.empty:
print(f'..Met data empty for {param}, trying sensor measurements')
try:
data = avg_hrly_df['mean_' + param].dropna()
sensor_data = True
except KeyError:
print('..{param} not measured by sensor, unable to plot '
'distribution')
continue
if data.empty:
print('..no intersensor averaged {param} data, unable to plot '
'distribution')
continue
sns.histplot(data,
ax=axs[i],
bins=15,
stat='percent',
kde=True,
color=fill_color[i][0],
**{'alpha': 0.6})
if param.startswith('RH'):
label = 'Reference Relative Humidity (%)'
if sensor_data:
axs[i].set_title('*Sensor Measurements Shown*',
fontsize=detail_font_size, y=0.97)
axs[i].set_xlabel(label, fontsize=detail_font_size)
axs[i].xaxis.set_major_locator(plt.MultipleLocator(25))
if param.startswith('Temp'):
label = 'Reference Temperature ($\\degree$C)'
if sensor_data:
axs[i].set_title('*Sensor Measurements Shown*',
fontsize=detail_font_size, y=0.97)
axs[i].set_xlabel(label, fontsize=detail_font_size)
axs[i].xaxis.set_major_locator(plt.MultipleLocator(10))
if param.startswith('DP'):
label = 'Reference Dew Point ($\\degree$C)'
if sensor_data:
label = label.replace('Reference', 'Sensor')
axs[i].set_title('*Sensor Measurements Shown*',
fontsize=detail_font_size, y=0.97)
axs[i].set_xlabel(label, fontsize=detail_font_size)
axs[i].set_ylabel('Relative Probability (%)',
fontsize=detail_font_size)
axs[i].tick_params(axis='both', labelsize=detail_font_size)
if write_to_file is True:
todays_date = get_todays_date()
file_path = os.path.join(figure_path, 'Met',
f'{sensor_name}_met_distplot_report_fmt_{todays_date}')
plt.savefig(file_path + '.png', dpi=300)
plt.close()
def recording_interval_histogram(full_df_list, xlims=(-10, 120), bar_width=2,
bar_alpha=.4):
"""Plot indicating the uneven time delta in sensor data.
Graphs bar plot of Log(counts) vs. time delta between consecutive timestamp
entries.
Args:
full_df_list (list):
List of pandas DataFrames containing timeseries data at the original
recorded sampling frequency.
xlims (Two-element tuple, optional):
The x-axis limits (in seconds) for displaying the distribution of
consecutive intervals between recorded timestamps. Defaults to
(-10, 120).
bar_width (int or float, optional):
The width of bars displayed in the figure. Defaults to 2.
bar_alpha (float, optional):
The transparency of bars displayed in the figure. Defaults to .4.
Returns:
None.
"""
xmin, xmax = xlims
if len(full_df_list) == 3:
color_list = ['#1f77b4', '#d62728', '#9467bd'] # blue, red, purple
else:
color_list = ['#9F99C8', '#fb8072', '#80b1d3', '#8dd3c7',
'#ffffb3', '#FD9962', '#b3de69']
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
for color, df in zip(color_list, full_df_list):
idx_name = df.index.name
delta = (df.index[1:] - df.index[0:-1]).to_frame()
ax.bar(delta[idx_name].value_counts().index.total_seconds(),
np.log10(delta[idx_name].value_counts().values),
width=bar_width, alpha=bar_alpha, edgecolor='none',
color=color)
ax.set_xlim(xmin, xmax)
ax.set_xlabel(r'$\Delta t$ (seconds)')
ax.set_ylabel(r'Log$_{10}$(counts)')
|
import numpy as np
import cv2
from matplotlib import pyplot as plt
#Read image
img = cv2.imread('Photos/maimy.jpg',1)
#color vector
color = ('b','g','r')
#https://docs.opencv.org/master/d1/db7/tutorial_py_histogram_begins.html
#iterate chanels to print histogram
for i,col in enumerate(color):
histr = cv2.calcHist([img],[i],None,[256],[0,256])
plt.plot(histr,color = col)
plt.xlim([0,256])
plt.show()
#threshold for each chanel, analyzing the previous histogram
TB = 90
TG = 115
TR = 135
#Chanels
CB = img[:,:,0]
CG = img[:,:,1]
CR = img[:,:,2]
#https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_thresholding/py_thresholding.html
#Tresholding with blue chanel CB
ret,thresh1 = cv2.threshold(CB,TB,255,cv2.THRESH_BINARY)
ret,thresh2 = cv2.threshold(CB,TB,255,cv2.THRESH_BINARY_INV)
ret,thresh3 = cv2.threshold(CB,TB,255,cv2.THRESH_TRUNC)
ret,thresh4 = cv2.threshold(CB,TB,255,cv2.THRESH_TOZERO)
ret,thresh5 = cv2.threshold(CB,TB,255,cv2.THRESH_TOZERO_INV)
titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
images = [CB, thresh1, thresh2, thresh3, thresh4, thresh5]
for i in range(6):
plt.subplot(2,3,i+1)
plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
# Adaptive Thresholding
img = cv2.medianBlur(CB, 5)
ret,th1 = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
th3 = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
titles = ['Original Image', 'Median Blur', 'Global Thresholding (v = 127)', 'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']
images = [CB, img, th1, th2, th3]
for i in range(5):
plt.subplot(2,3,i+1)
plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([])
plt.yticks([])
plt.show()
# Otsu’s Binarization
img = CB
# global thresholding
ret1,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
# Otsu's thresholding
ret2,th2 = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# Otsu's thresholding after Gaussian filtering
blur = cv2.GaussianBlur(img,(5,5),0)
ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# plot all the images and their histograms
images = [img, 0, th1, img, 0, th2, blur, 0, th3]
titles = ['Original Noisy Image','Histogram','Global Thresholding (v=127)',
'Original Noisy Image','Histogram',"Otsu's Thresholding",
'Gaussian filtered Image','Histogram',"Otsu's Thresholding"]
for i in range(3):
plt.subplot(3,3,i*3+1)
plt.imshow(images[i*3],'gray')
plt.title(titles[i*3])
plt.xticks([])
plt.yticks([])
plt.subplot(3,3,i*3+2)
plt.hist(images[i*3].ravel(),256)
plt.title(titles[i*3+1])
plt.xticks([])
plt.yticks([])
plt.subplot(3,3,i*3+3)
plt.imshow(images[i*3+2],'gray')
plt.title(titles[i*3+2])
plt.xticks([])
plt.yticks([])
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.