hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d3ffab12aba0bf57b658d29399fb177951fe65df | 754 | py | Python | trader/event.py | ptro-development/trader | 084a5c58351e8d7fb55d772ac030a91af940e4c6 | [
"MIT"
] | null | null | null | trader/event.py | ptro-development/trader | 084a5c58351e8d7fb55d772ac030a91af940e4c6 | [
"MIT"
] | null | null | null | trader/event.py | ptro-development/trader | 084a5c58351e8d7fb55d772ac030a91af940e4c6 | [
"MIT"
] | null | null | null | """
class Event:
def __init__(
self, relative_match_position, sample,
incomming_data, correlation, percentage):
self.data = {
"relative_match_position": relative_match_position,
"sample": sample,
"incoming_data": incomming_data,
"correlation": correlation,
"percentage": percentage,
}
def is_percentage_equal_or_bigger(self, percentage):
return percentage <= self.data["percentage"]
def __str__(self):
return str(self.data)
def get(self):
return self.data
"""
"""
def filter_events(events, percentage=45.0):
return [e for e in events
if e.is_percentage_equal_or_bigger(percentage) is True]
"""
| 26 | 67 | 0.611406 | """
class Event:
def __init__(
self, relative_match_position, sample,
incomming_data, correlation, percentage):
self.data = {
"relative_match_position": relative_match_position,
"sample": sample,
"incoming_data": incomming_data,
"correlation": correlation,
"percentage": percentage,
}
def is_percentage_equal_or_bigger(self, percentage):
return percentage <= self.data["percentage"]
def __str__(self):
return str(self.data)
def get(self):
return self.data
"""
"""
def filter_events(events, percentage=45.0):
return [e for e in events
if e.is_percentage_equal_or_bigger(percentage) is True]
"""
| 0 | 0 | 0 |
f437201abe6477d97df2c68ac13e61bd5aa73e1d | 4,938 | py | Python | app/modules/entity/user_entity.py | arxcdr/silverback | 212139cbc1a648d1f877d60f2d7c4d750eefc3da | [
"BSD-3-Clause"
] | null | null | null | app/modules/entity/user_entity.py | arxcdr/silverback | 212139cbc1a648d1f877d60f2d7c4d750eefc3da | [
"BSD-3-Clause"
] | null | null | null | app/modules/entity/user_entity.py | arxcdr/silverback | 212139cbc1a648d1f877d60f2d7c4d750eefc3da | [
"BSD-3-Clause"
] | null | null | null | """
User Entity Module
"""
# Standard Library
import datetime
# Third Party Library
from django.utils import timezone
from django.contrib.auth.models import User
from django.db.models.aggregates import Count
from django.contrib.auth.hashers import make_password
| 30.294479 | 104 | 0.595383 | """
User Entity Module
"""
# Standard Library
import datetime
# Third Party Library
from django.utils import timezone
from django.contrib.auth.models import User
from django.db.models.aggregates import Count
from django.contrib.auth.hashers import make_password
class UserEntity():
def insert_one(self, user):
"""Insert a New User"""
if self.get_one_by_username(user["username"]) is not False:
return False
new_user = User()
if "username" in user:
new_user.username = user["username"]
if "first_name" in user:
new_user.first_name = user["first_name"]
if "last_name" in user:
new_user.last_name = user["last_name"]
if "email" in user:
new_user.email = user["email"]
if "password" in user:
new_user.password = make_password(user["password"])
if "is_staff" in user:
new_user.is_staff = user["is_staff"]
if "is_active" in user:
new_user.is_active = user["is_active"]
if "is_superuser" in user:
new_user.is_superuser = user["is_superuser"]
if "last_login" in user:
new_user.last_login = user["last_login"]
if "date_joined" in user:
new_user.date_joined = user["date_joined"]
new_user.save()
return False if new_user.pk is None else new_user
def update_one_by_id(self, id, user_data):
user = self.get_one_by_id(id)
if user is not False:
if "username" in user_data:
user.username = user_data["username"]
if "first_name" in user_data:
user.first_name = user_data["first_name"]
if "last_name" in user_data:
user.last_name = user_data["last_name"]
if "email" in user_data:
user.email = user_data["email"]
if "password" in user_data:
user.password = make_password(user_data["password"])
if "is_staff" in user_data:
user.is_staff = user_data["is_staff"]
if "is_active" in user_data:
user.is_active = user_data["is_active"]
if "is_superuser" in user_data:
user.is_superuser = user_data["is_superuser"]
if "last_login" in user_data:
user.last_login = user_data["last_login"]
if "date_joined" in user_data:
user.date_joined = user_data["date_joined"]
user.save()
return True
return False
def get_one_by_id(self, user_id):
try:
user = User.objects.get(id=user_id)
return False if user.pk is None else user
except Exception:
return False
def get_one_by_username(self, username):
"""Get User By Username"""
try:
user = User.objects.get(username=username)
return False if user.pk is None else user
except Exception:
return False
def get_one_by_email(self, email):
"""Get User By Email"""
try:
user = User.objects.get(email=email)
return False if user.pk is None else user
except Exception:
return False
def update_password_by_email(self, email, new_password):
"""Update Password by Email"""
user = self.get_one_by_email(email)
if user is not False:
user.password = make_password(new_password)
user.save()
return True
return False
def validate_password_by_user_id(self, user_id, password):
user = self.get_one_by_id(user_id)
if user is not False and user.check_password(password) is True:
return True
return False
def update_password_by_user_id(self, user_id, new_password):
user = self.get_one_by_id(user_id)
if user is not False:
user.password = make_password(new_password)
user.save()
return True
return False
def delete_one_by_id(self, id):
user = self.get_one_by_id(id)
if user is not False:
count, deleted = user.delete()
return True if count > 0 else False
return False
def count_all(self):
return User.objects.count()
def get_all(self, offset=None, limit=None):
if offset is None or limit is None:
return User.objects.order_by('-date_joined').get()
return User.objects.order_by('-date_joined')[offset:limit+offset]
def count_over_days(self, days=7):
last_x_days = timezone.now() - datetime.timedelta(days)
return User.objects.filter(
date_joined__gte=last_x_days
).extra({"day": "date(date_joined)"}).values("day").order_by('-day').annotate(count=Count("id"))
def truncate(self):
return User.objects.all().delete()
| 2,494 | 2,156 | 23 |
b9fd87e03c1bbaed664491fb6559a588d9d995aa | 1,506 | py | Python | s3_utils.py | petersmittenaar/corona-calculator | e60c85e044436a1fcc60fbcfb9deb4ad6544df74 | [
"MIT"
] | null | null | null | s3_utils.py | petersmittenaar/corona-calculator | e60c85e044436a1fcc60fbcfb9deb4ad6544df74 | [
"MIT"
] | null | null | null | s3_utils.py | petersmittenaar/corona-calculator | e60c85e044436a1fcc60fbcfb9deb4ad6544df74 | [
"MIT"
] | null | null | null | import datetime
import os
from io import BytesIO
import boto3
from botocore.exceptions import ClientError
_S3_ACCESS_KEY = os.environ["AWSAccessKeyId"].replace("\r", "")
_S3_SECRET_KEY = os.environ["AWSSecretKey"].replace("\r", "")
_S3_BUCKET_NAME = "coronavirus-calculator-data"
DATESTRING_FORMAT_READABLE = "%A %d %B %Y, %H:%M %Z" # 'Sunday 30 November 2014'
def upload_file(data: bytes, object_name: str):
"""
Upload a file to an S3 bucket
:param data: Bytes to upload.
:param object_name: S3 object name.
:return: True if file was uploaded, else False
"""
buf = BytesIO(data)
s3_client = _configure_client()
try:
response = s3_client.put_object(
Body=buf, Bucket=_S3_BUCKET_NAME, Key=object_name
)
except ClientError as e:
print(e)
return False
return True
def download_file(object_name: str):
"""
Download a file from S3 bucket.
:param object_name: Name of object to download.
:return: Object bytes and date last modified.
"""
s3_client = _configure_client()
download = s3_client.get_object(Key=object_name, Bucket=_S3_BUCKET_NAME)
content = download["Body"].read()
last_modified = download["LastModified"].strftime(DATESTRING_FORMAT_READABLE)
return content, last_modified
| 28.415094 | 84 | 0.694555 | import datetime
import os
from io import BytesIO
import boto3
from botocore.exceptions import ClientError
_S3_ACCESS_KEY = os.environ["AWSAccessKeyId"].replace("\r", "")
_S3_SECRET_KEY = os.environ["AWSSecretKey"].replace("\r", "")
_S3_BUCKET_NAME = "coronavirus-calculator-data"
DATESTRING_FORMAT_READABLE = "%A %d %B %Y, %H:%M %Z" # 'Sunday 30 November 2014'
def _configure_client():
# Upload the file
s3_client = boto3.client(
"s3", aws_access_key_id=_S3_ACCESS_KEY, aws_secret_access_key=_S3_SECRET_KEY
)
return s3_client
def upload_file(data: bytes, object_name: str):
"""
Upload a file to an S3 bucket
:param data: Bytes to upload.
:param object_name: S3 object name.
:return: True if file was uploaded, else False
"""
buf = BytesIO(data)
s3_client = _configure_client()
try:
response = s3_client.put_object(
Body=buf, Bucket=_S3_BUCKET_NAME, Key=object_name
)
except ClientError as e:
print(e)
return False
return True
def download_file(object_name: str):
"""
Download a file from S3 bucket.
:param object_name: Name of object to download.
:return: Object bytes and date last modified.
"""
s3_client = _configure_client()
download = s3_client.get_object(Key=object_name, Bucket=_S3_BUCKET_NAME)
content = download["Body"].read()
last_modified = download["LastModified"].strftime(DATESTRING_FORMAT_READABLE)
return content, last_modified
| 167 | 0 | 23 |
36d5aa375ae3000a959d60eb531cd547c8b37d59 | 3,870 | py | Python | VQA_blocks.py | Neerajj9/Stacked-Attention-Networks-for-Visual-Question-Answring- | 9dfc318ded8e29a5a4a712140c0dd472b7793db6 | [
"MIT"
] | 10 | 2019-10-12T16:22:52.000Z | 2022-01-01T06:33:53.000Z | VQA_blocks.py | Neerajj9/Stacked-Attention-Networks-for-Visual-Question-Answring- | 9dfc318ded8e29a5a4a712140c0dd472b7793db6 | [
"MIT"
] | 1 | 2019-12-27T16:37:21.000Z | 2020-04-17T15:36:30.000Z | VQA_blocks.py | Neerajj9/Stacked-Attention-Networks-for-Visual-Question-Answring- | 9dfc318ded8e29a5a4a712140c0dd472b7793db6 | [
"MIT"
] | 4 | 2019-12-18T13:32:47.000Z | 2021-02-24T03:12:34.000Z | #!/usr/bin/env python
# coding: utf-8
# In[12]:
import tensorflow as tf
import numpy as np
import keras
import pandas as pd
import os
import matplotlib.pyplot as plt
dropout_rate = 0.4
# In[2]:
# In[3]:
#image = tf.placeholder(tf.float32 , [batch_size,224,224,3])
#x = image_layer(image)
#print(x.shape)
# In[4]:
# In[5]:
#quest = tf.placeholder(tf.int32 , [batch_size ,q_len])
#temp = question_layer(512 , 256 , quest , batch_size)
#print(temp.shape)
# In[6]:
# In[7]:
#att = attention(x , temp , 512 , True)
# In[8]:
| 23.597561 | 109 | 0.610336 | #!/usr/bin/env python
# coding: utf-8
# In[12]:
import tensorflow as tf
import numpy as np
import keras
import pandas as pd
import os
import matplotlib.pyplot as plt
dropout_rate = 0.4
# In[2]:
def image_layer(input_tenor):
with tf.variable_scope("image"):
base_model = tf.keras.applications.VGG16(input_tensor=input_tenor, include_top=False,weights='imagenet')
base_model.trainable = False
x = base_model.layers[-2].output
x = tf.reshape(x , [-1,x.shape[2]*x.shape[1] , x.shape[3]])
x = tf.layers.dense(x,1024)
return x
# In[3]:
#image = tf.placeholder(tf.float32 , [batch_size,224,224,3])
#x = image_layer(image)
#print(x.shape)
# In[4]:
def question_layer(embed_size ,embed_len , num_units , q_len , quest , batch_size ):
rnn = tf.nn.rnn_cell
lstm1 = rnn.BasicLSTMCell(num_units)
lstm_drop1 = rnn.DropoutWrapper(lstm1, output_keep_prob = 1 - 0.8)
lstm2 = rnn.BasicLSTMCell(num_units)
lstm_drop2 = rnn.DropoutWrapper(lstm2, output_keep_prob = 1 - 0.8)
final = rnn.MultiRNNCell([lstm_drop1,lstm_drop2])
state = final.zero_state(batch_size, tf.float32)
loss = 0.0
with tf.variable_scope("embed" , reuse=False):
for i in range(q_len):
if i==0:
ques_emb_linear = tf.zeros([batch_size, embed_size])
else:
tf.get_variable_scope().reuse_variables()
ques_emb_linear = quest[:,i-1]
# LSTM based question model
ques_emb_drop = tf.nn.dropout(ques_emb_linear, 1-dropout_rate)
ques_emb = tf.tanh(ques_emb_drop)
output, state = final(ques_emb, state)
question_emb = tf.reshape(tf.transpose(state, [2, 1, 0, 3]), [batch_size, -1])
return question_emb
# In[5]:
#quest = tf.placeholder(tf.int32 , [batch_size ,q_len])
#temp = question_layer(512 , 256 , quest , batch_size)
#print(temp.shape)
# In[6]:
def attention(image_tensor , question_tensor , out_dim , dropout):
img = tf.nn.tanh(tf.layers.dense(image_tensor , out_dim))
ques = tf.nn.tanh(tf.layers.dense(question_tensor , out_dim))
ques = tf.expand_dims(ques , axis = -2)
IQ = tf.nn.tanh(img + ques)
if dropout:
IQ = tf.nn.dropout(IQ , 0.5)
temp = tf.layers.dense(IQ , 1)
temp = tf.reshape(temp , [-1,temp.shape[1]])
p = tf.nn.softmax(temp)
p_exp = tf.expand_dims(p , axis = -1)
att_layer = tf.reduce_sum(p_exp * image_tensor , axis = 1)
final_out = att_layer + question_tensor
return p , final_out
# In[7]:
#att = attention(x , temp , 512 , True)
# In[8]:
def SAN(img_h , img_w , q_len , embed_size , lstm_units , attention_dim , num_output , batch_size ):
image = tf.placeholder(tf.float32 , [batch_size,img_h,img_w,3])
quest = tf.placeholder(tf.float32 , [batch_size ,q_len , embed_size])
label = tf.placeholder(tf.int32, [batch_size,])
image_embed = image_layer(image)
ques_embed = question_layer(embed_size ,attention_dim , lstm_units , q_len , quest , batch_size )
att_l1 , att = attention( image_embed , ques_embed , attention_dim , True)
att_l2 , att = attention( image_embed , att , attention_dim , True)
att = tf.nn.dropout(att , dropout_rate)
att = tf.layers.dense(att , num_output)
print(att.shape , label.shape )
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels= label , logits=att)
loss = tf.reduce_mean(loss)
att = tf.nn.softmax(att)
print(att.shape)
attention_layers = [att_l1 , att_l2]
return loss , image , quest , label , attention_layers , att
| 3,231 | 0 | 92 |
36e53bc6cba83a02182daac0ce0b7271b29a6f65 | 1,600 | py | Python | dev/Main.py | betaros/traffic_sign | 6f5ef4afb7093c929cc2e94c7f72daebbd149b7e | [
"MIT"
] | null | null | null | dev/Main.py | betaros/traffic_sign | 6f5ef4afb7093c929cc2e94c7f72daebbd149b7e | [
"MIT"
] | null | null | null | dev/Main.py | betaros/traffic_sign | 6f5ef4afb7093c929cc2e94c7f72daebbd149b7e | [
"MIT"
] | null | null | null | """
This project trains an AI to detect german traffic signs and sends the recognized signs to ros
TODO:
- interpolate ROI from CSV to new dimensions
- integrate ROS platform
Authors: Jan Fuesting
Last edited: 10.09.2018
"""
import os
from Misc import Misc
from Recognition import Recognition
from Training import Training
# Conflict ROS Kinetic and OpenCV
# https://stackoverflow.com/questions/43019951/after-install-ros-kinetic-cannot-import-opencv
class Main:
"""
Main class
"""
def __init__(self):
"""
Initialization
"""
self.misc = Misc()
self.recognition = Recognition()
self.training = Training()
def run(self):
"""
This method controls program sequence
:return:
"""
# Initialize system
self.misc.logger.debug("Program started")
dataset_path = self.misc.project_root + "/dataset"
if not os.path.exists(dataset_path):
os.makedirs(dataset_path)
# Getting and manipulating datasets
# self.training.download_pos_files(images=True, haar=True)
# self.training.download_neg_files()
# self.training.download_face_recognition_haar()
# self.training.manipulate_image()
# self.training.generate_description_traffic()
# self.training.generate_description_airplanes()
# Get camera image and find traffic signs
# self.recognition.face_recognition()
self.recognition.get_camera_image()
self.misc.logger.debug("Program finished")
main = Main()
main.run()
| 26.229508 | 94 | 0.6625 | """
This project trains an AI to detect german traffic signs and sends the recognized signs to ros
TODO:
- interpolate ROI from CSV to new dimensions
- integrate ROS platform
Authors: Jan Fuesting
Last edited: 10.09.2018
"""
import os
from Misc import Misc
from Recognition import Recognition
from Training import Training
# Conflict ROS Kinetic and OpenCV
# https://stackoverflow.com/questions/43019951/after-install-ros-kinetic-cannot-import-opencv
class Main:
"""
Main class
"""
def __init__(self):
"""
Initialization
"""
self.misc = Misc()
self.recognition = Recognition()
self.training = Training()
def run(self):
"""
This method controls program sequence
:return:
"""
# Initialize system
self.misc.logger.debug("Program started")
dataset_path = self.misc.project_root + "/dataset"
if not os.path.exists(dataset_path):
os.makedirs(dataset_path)
# Getting and manipulating datasets
# self.training.download_pos_files(images=True, haar=True)
# self.training.download_neg_files()
# self.training.download_face_recognition_haar()
# self.training.manipulate_image()
# self.training.generate_description_traffic()
# self.training.generate_description_airplanes()
# Get camera image and find traffic signs
# self.recognition.face_recognition()
self.recognition.get_camera_image()
self.misc.logger.debug("Program finished")
main = Main()
main.run()
| 0 | 0 | 0 |
52924c6d045dcc6d77514350466a12a4efd68223 | 377 | py | Python | backend/app/api/api_v1/router/auth/schemas/role.py | PY-GZKY/fastapi-crawl-admin | 6535054994d11e3c31b4caeae65e8fa0f495d2b7 | [
"MIT"
] | 13 | 2021-07-25T15:26:04.000Z | 2022-03-02T12:12:02.000Z | backend/app/api/api_v1/router/auth/schemas/role.py | PY-GZKY/fastapi-crawl-admin | 6535054994d11e3c31b4caeae65e8fa0f495d2b7 | [
"MIT"
] | 1 | 2021-07-26T03:26:09.000Z | 2021-07-26T09:05:38.000Z | backend/app/api/api_v1/router/auth/schemas/role.py | PY-GZKY/fastapi-crawl-admin | 6535054994d11e3c31b4caeae65e8fa0f495d2b7 | [
"MIT"
] | 3 | 2021-07-26T01:44:24.000Z | 2021-07-31T14:31:49.000Z |
# -*- coding: utf-8 -*-
"""
权限表
"""
from typing import Optional
from pydantic import BaseModel
class RoleCreate(BaseModel):
"""
创建角色字段
"""
role_id: int
role_name: str
permission_id: int
re_mark: Optional[str] = None
class RoleUpdate(BaseModel):
"""
角色更新字段
"""
role_name: Optional[str] = None
re_mark: Optional[str] = None
| 13.464286 | 35 | 0.61008 |
# -*- coding: utf-8 -*-
"""
权限表
"""
from typing import Optional
from pydantic import BaseModel
class RoleCreate(BaseModel):
"""
创建角色字段
"""
role_id: int
role_name: str
permission_id: int
re_mark: Optional[str] = None
class RoleUpdate(BaseModel):
"""
角色更新字段
"""
role_name: Optional[str] = None
re_mark: Optional[str] = None
| 0 | 0 | 0 |
69a9c95696514076bf4eb1b286dd7e5182875bdd | 596 | py | Python | scripts/cscap/plotids/add_plotid_cols.py | akrherz/datateam | 2efbaa24ff2e28115eeabce9193c3d3b152068d8 | [
"MIT"
] | 5 | 2017-05-20T04:51:55.000Z | 2022-03-07T18:55:27.000Z | scripts/cscap/plotids/add_plotid_cols.py | isudatateam/datateam | eb8e1dad6c05cb1b236689862fe87c56b25ea6fc | [
"MIT"
] | 275 | 2017-03-09T20:31:30.000Z | 2022-03-30T22:43:47.000Z | scripts/cscap/plotids/add_plotid_cols.py | isudatateam/datateam | eb8e1dad6c05cb1b236689862fe87c56b25ea6fc | [
"MIT"
] | 3 | 2020-06-01T15:03:06.000Z | 2021-02-01T13:46:58.000Z | import pyiem.cscap_utils as util
drive = util.get_driveclient(util.get_config(), "cscap")
spr_client = util.get_spreadsheet_client(util.get_config())
res = drive.files().list(q="title contains 'Plot Identifiers'").execute()
for item in res["items"]:
if item["mimeType"] != "application/vnd.google-apps.spreadsheet":
continue
print(item["title"])
spreadsheet = util.Spreadsheet(spr_client, item["id"])
spreadsheet.get_worksheets()
sheet = spreadsheet.worksheets["Sheet 1"]
for col in ["AGRO", "SOIL", "GHG", "IPM_CSCAP", "IPM_USB"]:
sheet.add_column(col)
| 37.25 | 73 | 0.694631 | import pyiem.cscap_utils as util
drive = util.get_driveclient(util.get_config(), "cscap")
spr_client = util.get_spreadsheet_client(util.get_config())
res = drive.files().list(q="title contains 'Plot Identifiers'").execute()
for item in res["items"]:
if item["mimeType"] != "application/vnd.google-apps.spreadsheet":
continue
print(item["title"])
spreadsheet = util.Spreadsheet(spr_client, item["id"])
spreadsheet.get_worksheets()
sheet = spreadsheet.worksheets["Sheet 1"]
for col in ["AGRO", "SOIL", "GHG", "IPM_CSCAP", "IPM_USB"]:
sheet.add_column(col)
| 0 | 0 | 0 |
69b00cf1c9ed498b25b9af30722f160d0fd2e217 | 104 | py | Python | airflow_kubernetes_job_operator/exceptions.py | Fahadsaadullahkhan/KubernetesJobOperator | d96f9498667f937503d1e45142060904674f823f | [
"MIT"
] | 35 | 2020-02-10T16:55:41.000Z | 2022-03-18T01:25:00.000Z | airflow_kubernetes_job_operator/exceptions.py | Fahadsaadullahkhan/KubernetesJobOperator | d96f9498667f937503d1e45142060904674f823f | [
"MIT"
] | 26 | 2020-02-10T05:36:44.000Z | 2022-03-02T18:44:47.000Z | airflow_kubernetes_job_operator/exceptions.py | Fahadsaadullahkhan/KubernetesJobOperator | d96f9498667f937503d1e45142060904674f823f | [
"MIT"
] | 8 | 2020-02-28T23:24:07.000Z | 2021-11-29T21:35:46.000Z | from airflow import AirflowException
| 17.333333 | 55 | 0.846154 | from airflow import AirflowException
class KubernetesJobOperatorException(AirflowException):
pass
| 0 | 43 | 23 |
b947ccae0e129557d13f2de03ced06680b1afbfc | 1,802 | py | Python | pandapower/test/loadflow/test_PTDF_LODF.py | jurasofish/pandapower | 18b2fafe7beb9a39cae104532822327960c2622b | [
"BSD-3-Clause"
] | 1 | 2020-10-06T14:04:21.000Z | 2020-10-06T14:04:21.000Z | pandapower/test/loadflow/test_PTDF_LODF.py | jurasofish/pandapower | 18b2fafe7beb9a39cae104532822327960c2622b | [
"BSD-3-Clause"
] | null | null | null | pandapower/test/loadflow/test_PTDF_LODF.py | jurasofish/pandapower | 18b2fafe7beb9a39cae104532822327960c2622b | [
"BSD-3-Clause"
] | 1 | 2020-04-09T08:04:10.000Z | 2020-04-09T08:04:10.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pytest
import pandapower as pp
import pandapower.networks as nw
from pandapower.pd2ppc import _pd2ppc
from pandapower.pypower.makePTDF import makePTDF
from pandapower.pypower.makeLODF import makeLODF
from pandapower.test.loadflow.result_test_network_generator import result_test_network_generator_dcpp
from pandapower.test.toolbox import add_grid_connection, create_test_line, assert_net_equal
if __name__ == "__main__":
pytest.main([__file__, "-xs"])
| 34 | 101 | 0.686459 | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pytest
import pandapower as pp
import pandapower.networks as nw
from pandapower.pd2ppc import _pd2ppc
from pandapower.pypower.makePTDF import makePTDF
from pandapower.pypower.makeLODF import makeLODF
from pandapower.test.loadflow.result_test_network_generator import result_test_network_generator_dcpp
from pandapower.test.toolbox import add_grid_connection, create_test_line, assert_net_equal
def test_PTDF():
net = nw.case9()
pp.rundcpp(net)
_, ppci = _pd2ppc(net)
ptdf = makePTDF(ppci["baseMVA"], ppci["bus"], ppci["branch"],
using_sparse_solver=False)
_ = makePTDF(ppci["baseMVA"], ppci["bus"], ppci["branch"],
result_side=1, using_sparse_solver=False)
ptdf_sparse = makePTDF(ppci["baseMVA"], ppci["bus"], ppci["branch"],
using_sparse_solver=True)
if not np.allclose(ptdf, ptdf_sparse):
raise AssertionError("Sparse PTDF has differenct result against dense PTDF")
if not ptdf.shape == (ppci["bus"].shape[0], ppci["branch"].shape[0]):
raise AssertionError("PTDF has wrong dimension")
if not np.all(~np.isnan(ptdf)):
raise AssertionError("PTDF has NaN value")
def test_LODF():
net = nw.case9()
pp.rundcpp(net)
_, ppci = _pd2ppc(net)
ptdf = makePTDF(ppci["baseMVA"], ppci["bus"], ppci["branch"])
lodf = makeLODF(ppci["branch"], ptdf)
if not lodf.shape == (ppci["branch"].shape[0], ppci["branch"].shape[0]):
raise AssertionError("LODF has wrong dimension")
if __name__ == "__main__":
pytest.main([__file__, "-xs"])
| 1,078 | 0 | 46 |
6f1a9450b1a8af20cdfb42b59155bf16615fe896 | 11,140 | py | Python | py/lvmspec/qa/qa_prod.py | sdss/lvmspec | befd6991537c4947fdf63ca262937f2bb845148f | [
"BSD-3-Clause"
] | null | null | null | py/lvmspec/qa/qa_prod.py | sdss/lvmspec | befd6991537c4947fdf63ca262937f2bb845148f | [
"BSD-3-Clause"
] | null | null | null | py/lvmspec/qa/qa_prod.py | sdss/lvmspec | befd6991537c4947fdf63ca262937f2bb845148f | [
"BSD-3-Clause"
] | null | null | null | """ Class to organize QA for a full DESI production run
"""
from __future__ import print_function, absolute_import, division
import numpy as np
import glob, os
import warnings
from lvmspec.io import get_exposures
from lvmspec.io import get_files
from lvmspec.io import read_frame
from lvmspec.io import read_meta_frame
from lvmspec.io import specprod_root
from lvmutil.log import get_logger
# log = get_logger()
| 43.515625 | 144 | 0.525583 | """ Class to organize QA for a full DESI production run
"""
from __future__ import print_function, absolute_import, division
import numpy as np
import glob, os
import warnings
from lvmspec.io import get_exposures
from lvmspec.io import get_files
from lvmspec.io import read_frame
from lvmspec.io import read_meta_frame
from lvmspec.io import specprod_root
from lvmutil.log import get_logger
# log = get_logger()
class QA_Prod(object):
def __init__(self, specprod_dir=None):
""" Class to organize and execute QA for a DESI production
Args:
specprod_dir(str): Path containing the exposures/ directory to use. If the value
is None, then the value of :func:`specprod_root` is used instead.
Notes:
Attributes:
qa_exps : list
List of QA_Exposure classes, one per exposure in production
data : dict
"""
if specprod_dir is None:
specprod_dir = specprod_root()
self.specprod_dir = specprod_dir
tmp = specprod_dir.split('/')
self.prod_name = tmp[-1] if (len(tmp[-1]) > 0) else tmp[-2]
self.qa_exps = []
#
self.data = {}
def get_qa_table(self, qatype, metric, nights='all', channels='all'):
""" Generate a table of QA values from .data
Args:
qatype: str
FIBERFLAT, SKYSUB
metric: str
nights: str or list of str, optional
channels: str or list of str, optional
'b', 'r', 'z'
Returns:
qa_tbl: Table
"""
from astropy.table import Table
out_list = []
out_expid = []
out_expmeta = []
out_cameras = []
# Nights
for night in self.data:
if (night not in nights) and (nights != 'all'):
continue
# Exposures
for expid in self.data[night]:
# Cameras
exp_meta = self.data[night][expid]['meta']
for camera in self.data[night][expid]:
if camera in ['flavor', 'meta']:
continue
if (camera[0] not in channels) and (channels != 'all'):
continue
# Grab
try:
val = self.data[night][expid][camera][qatype]['METRICS'][metric]
except KeyError: # Each exposure has limited qatype
pass
except TypeError:
import pdb; pdb.set_trace()
else:
if isinstance(val, (list,tuple)):
out_list.append(val[0])
else:
out_list.append(val)
# Meta data
out_expid.append(expid)
out_cameras.append(camera)
out_expmeta.append(exp_meta)
# Return Table
qa_tbl = Table()
qa_tbl[metric] = out_list
qa_tbl['EXPID'] = out_expid
qa_tbl['CAMERA'] = out_cameras
# Add expmeta
for key in out_expmeta[0].keys():
tmp_list = []
for exp_meta in out_expmeta:
tmp_list.append(exp_meta[key])
qa_tbl[key] = tmp_list
return qa_tbl
def load_data(self, inroot=None):
""" Load QA data from disk
"""
from lvmspec.io.qa import load_qa_prod
#
if inroot is None:
inroot = self.specprod_dir+'/QA/'+self.prod_name+'_qa'
self.data = load_qa_prod(inroot)
def make_frameqa(self, make_plots=False, clobber=True):
""" Work through the Production and make QA for all frames
Parameters:
make_plots: bool, optional
Remake the plots too?
clobber: bool, optional
Returns:
"""
# imports
from lvmspec.io import meta
from lvmspec.io.qa import load_qa_frame, write_qa_frame
from lvmspec.io.fiberflat import read_fiberflat
from lvmspec.io.sky import read_sky
from lvmspec.io.fluxcalibration import read_flux_calibration
from lvmspec.qa import qa_plots
from lvmspec.qa.qa_frame import qaframe_from_frame
from lvmspec.io.fluxcalibration import read_stdstar_models
log = get_logger()
# Loop on nights
path_nights = glob.glob(self.specprod_dir+'/exposures/*')
nights = [ipathn[ipathn.rfind('/')+1:] for ipathn in path_nights]
for night in nights:
for exposure in get_exposures(night, specprod_dir = self.specprod_dir):
# Object only??
frames_dict = get_files(filetype = str('frame'), night = night,
expid = exposure, specprod_dir = self.specprod_dir)
for camera,frame_fil in frames_dict.items():
# Load frame
qaframe_from_frame(frame_fil, make_plots=make_plots)
'''
frame_meta = read_meta_frame(frame_fil) # Only meta to speed it up
spectro = int(frame_meta['CAMERA'][-1])
if frame_meta['FLAVOR'] in ['flat','arc']:
qatype = 'qa_calib'
else:
qatype = 'qa_data'
qafile = meta.findfile(qatype, night=night, camera=camera, expid=exposure, specprod_dir=self.specprod_dir)
if (not clobber) & os.path.isfile(qafile):
log.info("qafile={:s} exists. Not over-writing. Consider clobber=True".format(qafile))
continue
else: # Now the full read
frame = read_frame(frame_fil)
# Load
try:
qaframe = load_qa_frame(qafile, frame, flavor=frame.meta['FLAVOR'])
except AttributeError:
import pdb; pdb.set_trace
# Flat QA
if frame.meta['FLAVOR'] in ['flat']:
fiberflat_fil = meta.findfile('fiberflat', night=night, camera=camera, expid=exposure, specprod_dir=self.specprod_dir)
fiberflat = read_fiberflat(fiberflat_fil)
qaframe.run_qa('FIBERFLAT', (frame, fiberflat), clobber=clobber)
if make_plots:
# Do it
qafig = meta.findfile('qa_flat_fig', night=night, camera=camera, expid=exposure, specprod_dir=self.specprod_dir)
qa_plots.frame_fiberflat(qafig, qaframe, frame, fiberflat)
# SkySub QA
if qatype == 'qa_data':
sky_fil = meta.findfile('sky', night=night, camera=camera, expid=exposure, specprod_dir=self.specprod_dir)
try:
skymodel = read_sky(sky_fil)
except FileNotFoundError:
warnings.warn("Sky file {:s} not found. Skipping..".format(sky_fil))
else:
qaframe.run_qa('SKYSUB', (frame, skymodel))
if make_plots:
qafig = meta.findfile('qa_sky_fig', night=night, camera=camera, expid=exposure, specprod_dir=self.specprod_dir)
qa_plots.frame_skyres(qafig, frame, skymodel, qaframe)
# FluxCalib QA
if qatype == 'qa_data':
# Standard stars
stdstar_fil = meta.findfile('stdstars', night=night, camera=camera, expid=exposure, specprod_dir=self.specprod_dir,
spectrograph=spectro)
#try:
# model_tuple=read_stdstar_models(stdstar_fil)
#except FileNotFoundError:
# warnings.warn("Standard star file {:s} not found. Skipping..".format(stdstar_fil))
#else:
flux_fil = meta.findfile('calib', night=night, camera=camera, expid=exposure, specprod_dir=self.specprod_dir)
try:
fluxcalib = read_flux_calibration(flux_fil)
except FileNotFoundError:
warnings.warn("Flux file {:s} not found. Skipping..".format(flux_fil))
else:
qaframe.run_qa('FLUXCALIB', (frame, fluxcalib)) #, model_tuple))#, indiv_stars))
if make_plots:
qafig = meta.findfile('qa_flux_fig', night=night, camera=camera, expid=exposure, specprod_dir=self.specprod_dir)
qa_plots.frame_fluxcalib(qafig, qaframe, frame, fluxcalib)#, model_tuple)
# Write
write_qa_frame(qafile, qaframe)
'''
def slurp(self, make_frameqa=False, remove=True, **kwargs):
""" Slurp all the individual QA files into one master QA file
Args:
make_frameqa: bool, optional
Regenerate the individual QA files (at the frame level first)
remove: bool, optional
Remove
Returns:
"""
from lvmspec.qa import QA_Exposure
from lvmspec.io import write_qa_prod
log = get_logger()
# Remake?
if make_frameqa:
self.make_frameqa(**kwargs)
# Loop on nights
path_nights = glob.glob(self.specprod_dir+'/exposures/*')
nights = [ipathn[ipathn.rfind('/')+1:] for ipathn in path_nights]
# Reset
log.info("Resetting qa_exps in qa_prod")
self.qa_exps = []
# Loop
for night in nights:
# Loop on exposures
for exposure in get_exposures(night, specprod_dir = self.specprod_dir):
frames_dict = get_files(filetype = str('frame'), night = night,
expid = exposure, specprod_dir = self.specprod_dir)
if len(frames_dict) == 0:
continue
# Load any frame (for the type and meta info)
key = list(frames_dict.keys())[0]
frame_fil = frames_dict[key]
frame_meta = read_meta_frame(frame_fil)
qa_exp = QA_Exposure(exposure, night, frame_meta['FLAVOR'],
specprod_dir=self.specprod_dir, remove=remove)
qa_exp.load_meta(frame_meta)
# Append
self.qa_exps.append(qa_exp)
# Write
outroot = self.specprod_dir+'/QA/'+self.prod_name+'_qa'
write_qa_prod(outroot, self)
def __repr__(self):
""" Print formatting
"""
return ('{:s}: specprod_dir={:s}'.format(self.__class__.__name__, self.specprod_dir))
| 0 | 10,699 | 23 |
c3e38e2aa1e3be0fc17ec59eea883af516eadc2e | 2,607 | py | Python | ai-mon-hoc/main.py | nguyenquanghieu2000d/ai-mon-hoc | 6026abd6157992004930f99b5fd6c9d8269c8d2d | [
"Apache-2.0"
] | null | null | null | ai-mon-hoc/main.py | nguyenquanghieu2000d/ai-mon-hoc | 6026abd6157992004930f99b5fd6c9d8269c8d2d | [
"Apache-2.0"
] | null | null | null | ai-mon-hoc/main.py | nguyenquanghieu2000d/ai-mon-hoc | 6026abd6157992004930f99b5fd6c9d8269c8d2d | [
"Apache-2.0"
] | null | null | null | import io
import os
import nest_asyncio
import uvicorn
# from PIL import Image
from dotenv import load_dotenv
from fastapi import FastAPI, UploadFile, File, Request
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import base64
# from coreai.tichHop import Vgg16DetectFace
from rete import tinhluong
import numpy as np
load_dotenv()
# HOST = os.getenv("HOST")
app = FastAPI()
LIST_BANG_CAP = ["Cử nhân", "Thạc sĩ", "Tiến sĩ", "Phó giáo sư", "Giáo sư", "Khác"]
# vgg = Vgg16DetectFace()
# vgg.LoadModel()
origins = [
"http://localhost.tiangolo.com",
"https://localhost.tiangolo.com",
"http://localhost",
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/")
@app.post("/tinhLuong/")
# def getAndDeCodeImage(data):
# file_bytes = np.asarray(bytearray(data), dtype=np.uint8)
# img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
# return img
# def stringToRGB(base64_string):
# imgdata = base64.b64decode(str(base64_string))
# image = Image.open(io.BytesIO(imgdata))
# return cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
# def encodeImage(image):
# retval, buffer = cv2.imencode('.jpg', image)
# jpg_as_text = base64.b64encode(buffer)
# return jpg_as_text
# @app.post("/deAndRecorg/")
# async def create_file(
# request: Request,
# data: Item2,
# ):
# img = stringToRGB(data.image.split(",")[1])
# data = vgg.predictFace(img)
# data['image'] = "data:image/jpg;base64," + encodeImage(data['image']).decode('utf-8')
# return data
# PORT = 8000
# ngrok_tunnel = ngrok.connect(PORT)
# print('Public URL:', ngrok_tunnel.public_url)
# nest_asyncio.apply()
# uvicorn.run(app, host=HOST, port=PORT)
# uvicorn.run(app, host=HOST, port=PORT)
# uvicorn.run(app, port=PORT)
| 22.282051 | 91 | 0.63598 | import io
import os
import nest_asyncio
import uvicorn
# from PIL import Image
from dotenv import load_dotenv
from fastapi import FastAPI, UploadFile, File, Request
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import base64
# from coreai.tichHop import Vgg16DetectFace
from rete import tinhluong
import numpy as np
load_dotenv()
# HOST = os.getenv("HOST")
app = FastAPI()
LIST_BANG_CAP = ["Cử nhân", "Thạc sĩ", "Tiến sĩ", "Phó giáo sư", "Giáo sư", "Khác"]
# vgg = Vgg16DetectFace()
# vgg.LoadModel()
origins = [
"http://localhost.tiangolo.com",
"https://localhost.tiangolo.com",
"http://localhost",
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class Item(BaseModel):
soBuoiDay: float
phiGuiXe: float
thuongThem: float
chucVu: str
khoa: str
namKinhNghiem: int
thoiGianLamViec: int
bangCap: str
class Item2(BaseModel):
image: str
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.post("/tinhLuong/")
def read_item(item: Item):
print(item)
luong = tinhluong(item.phiGuiXe,
item.thuongThem,
item.chucVu,
item.khoa,
item.namKinhNghiem,
item.thoiGianLamViec,
item.bangCap,
item.soBuoiDay)
return {
"luong": luong
}
# def getAndDeCodeImage(data):
# file_bytes = np.asarray(bytearray(data), dtype=np.uint8)
# img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
# return img
# def stringToRGB(base64_string):
# imgdata = base64.b64decode(str(base64_string))
# image = Image.open(io.BytesIO(imgdata))
# return cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
# def encodeImage(image):
# retval, buffer = cv2.imencode('.jpg', image)
# jpg_as_text = base64.b64encode(buffer)
# return jpg_as_text
# @app.post("/deAndRecorg/")
# async def create_file(
# request: Request,
# data: Item2,
# ):
# img = stringToRGB(data.image.split(",")[1])
# data = vgg.predictFace(img)
# data['image'] = "data:image/jpg;base64," + encodeImage(data['image']).decode('utf-8')
# return data
# PORT = 8000
# ngrok_tunnel = ngrok.connect(PORT)
# print('Public URL:', ngrok_tunnel.public_url)
# nest_asyncio.apply()
# uvicorn.run(app, host=HOST, port=PORT)
# uvicorn.run(app, host=HOST, port=PORT)
# uvicorn.run(app, port=PORT)
| 407 | 176 | 90 |
9b6eb4da234bd40396496cc954eebc4bbe836868 | 664 | py | Python | sfeprapy/gui.py | PYSFE/SFEPraPy | c53cccddc10ff879dff759ae0d87e60c50a13461 | [
"MIT"
] | null | null | null | sfeprapy/gui.py | PYSFE/SFEPraPy | c53cccddc10ff879dff759ae0d87e60c50a13461 | [
"MIT"
] | null | null | null | sfeprapy/gui.py | PYSFE/SFEPraPy | c53cccddc10ff879dff759ae0d87e60c50a13461 | [
"MIT"
] | null | null | null | import tkinter as tk
if __name__ == '__main__':
master = tk.Tk()
dict_entries = {
'item 1': int,
'item 2': str,
}
master.title('Hello World!')
i = 0
dict_tk_entry = {}
for key, val in dict_entries.items():
tk.Label(master, text=str(key)).grid(row=i)
dict_tk_entry[key] = tk.Entry(master)
dict_tk_entry[key].grid(row=i, column=1)
i += 1
# tk.Label(master, text="First").grid(row=0)
# tk.Label(master, text="Second").grid(row=1)
# e1 = tk.Entry(master)
# e2 = tk.Entry(master)
#
# e1.grid(row=0, column=1)
# e2.grid(row=1, column=1)
master.mainloop() | 21.419355 | 51 | 0.555723 | import tkinter as tk
if __name__ == '__main__':
master = tk.Tk()
dict_entries = {
'item 1': int,
'item 2': str,
}
master.title('Hello World!')
i = 0
dict_tk_entry = {}
for key, val in dict_entries.items():
tk.Label(master, text=str(key)).grid(row=i)
dict_tk_entry[key] = tk.Entry(master)
dict_tk_entry[key].grid(row=i, column=1)
i += 1
# tk.Label(master, text="First").grid(row=0)
# tk.Label(master, text="Second").grid(row=1)
# e1 = tk.Entry(master)
# e2 = tk.Entry(master)
#
# e1.grid(row=0, column=1)
# e2.grid(row=1, column=1)
master.mainloop() | 0 | 0 | 0 |
71f1abc62366376729fedd7f9dfcdfbba0acd370 | 1,691 | py | Python | views/customer_data_report.py | Api2sem2021/5-ADS2020-2-equipe3 | 12e3882b6446976aad0ef12cd06ed8c29d87416d | [
"AFL-3.0"
] | 3 | 2021-09-19T22:01:23.000Z | 2021-12-11T14:35:20.000Z | views/customer_data_report.py | Api2sem2021/5-ADS2020-2-equipe3 | 12e3882b6446976aad0ef12cd06ed8c29d87416d | [
"AFL-3.0"
] | 17 | 2020-11-06T15:54:32.000Z | 2020-12-13T23:35:57.000Z | views/customer_data_report.py | Api2sem2021/5-ADS2020-2-equipe3 | 12e3882b6446976aad0ef12cd06ed8c29d87416d | [
"AFL-3.0"
] | 7 | 2020-11-09T14:01:20.000Z | 2021-12-11T14:35:10.000Z | #THIRD PARTY IMPORTS
from datetime import datetime
from views.utils import RequiredLoginViewMixin
from flask.globals import current_app, g
from flask.helpers import send_file
from flask.templating import render_template
from flask.views import MethodView
import os
import shutil
# LOCAL IMPORTS
| 31.90566 | 85 | 0.669426 | #THIRD PARTY IMPORTS
from datetime import datetime
from views.utils import RequiredLoginViewMixin
from flask.globals import current_app, g
from flask.helpers import send_file
from flask.templating import render_template
from flask.views import MethodView
import os
import shutil
# LOCAL IMPORTS
class CustomerDataReport(RequiredLoginViewMixin, MethodView):
REPORT_NAME = 'Customer Data Report'
TPL_BASE_DIR = 'report/customer_data/'
TPL_CUSTOMER_PROFILE = 'customer_profile'
TPL_PURCHASING_HISTORY = 'purchasing_history'
TPL_INDEX = 'index'
TEMPLATES = [
TPL_CUSTOMER_PROFILE,
TPL_PURCHASING_HISTORY,
TPL_INDEX,
]
@property
def archive_name(self):
return f'{self.REPORT_NAME}_{datetime.now()}'
@property
def path_report(self):
return f'{current_app.config["REPORT_PATH"]}/{g.user.customerid}'
def path_template(self, template):
return self.TPL_BASE_DIR + template + current_app.config['APP_TEMPLATE_EXT']
def post(self):
report_name = self.build_customer_data_report()
return send_file(report_name, mimetype='zip', as_attachment=True)
def build_customer_data_report(self):
self.render_report(g.user, self.path_report)
return shutil.make_archive(self.archive_name, 'zip', self.path_report)
def render_report(self, customer, dir):
os.makedirs(dir, exist_ok=True)
for TPL in self.TEMPLATES:
with open(f'{dir}/{TPL}.html', 'w') as f:
f.write(render_template(self.path_template(TPL), user=customer))
| 740 | 608 | 24 |
50cd69ba8bbf8aa0a2cd7176b120a1da80366720 | 1,401 | py | Python | exam_preparation/february_14_2021/problem_3/problem_3.py | nmoskova/Python-advanced | 007f496e868aa151e39d79446b055e76ffb2db95 | [
"MIT"
] | null | null | null | exam_preparation/february_14_2021/problem_3/problem_3.py | nmoskova/Python-advanced | 007f496e868aa151e39d79446b055e76ffb2db95 | [
"MIT"
] | null | null | null | exam_preparation/february_14_2021/problem_3/problem_3.py | nmoskova/Python-advanced | 007f496e868aa151e39d79446b055e76ffb2db95 | [
"MIT"
] | null | null | null | from collections import deque
print(stock_availability(["choco", "vanilla", "banana"], "delivery", "caramel", "berry"))
print(stock_availability(["chocolate", "vanilla", "banana"], "delivery", "cookie","banana"))
print(stock_availability(["chocolate", "vanilla", "banana"], "sell"))
print(stock_availability(["chocolate", "vanilla", "banana"], "sell", 1, 1))
print(stock_availability(["chocolate", "chocolate", "banana"], "sell", "chocolate"))
print(stock_availability(["cookie", "chocolate", "banana"], "sell", "chocolate"))
print(stock_availability(["chocolate", "vanilla", "banana"], "sell", "cookie"))
| 31.840909 | 92 | 0.562455 | from collections import deque
def stock_availability(*args):
args = deque(args)
boxes = args.popleft()
action = args.popleft()
if action == "delivery" and len(args) > 0:
for box in args:
boxes.append(box)
elif action == "sell" and len(args) == 0:
if len(boxes) > 1:
boxes = boxes[1:]
else:
boxes = []
elif action == "sell" and len(args) > 0:
parameter = args[0]
if isinstance(parameter, int):
for arg in args:
if arg >= len(boxes):
boxes = []
elif 0 <= arg < len(boxes):
boxes = boxes[parameter:]
else:
for arg in args:
while arg in boxes:
boxes.remove(arg)
return boxes
print(stock_availability(["choco", "vanilla", "banana"], "delivery", "caramel", "berry"))
print(stock_availability(["chocolate", "vanilla", "banana"], "delivery", "cookie","banana"))
print(stock_availability(["chocolate", "vanilla", "banana"], "sell"))
print(stock_availability(["chocolate", "vanilla", "banana"], "sell", 1, 1))
print(stock_availability(["chocolate", "chocolate", "banana"], "sell", "chocolate"))
print(stock_availability(["cookie", "chocolate", "banana"], "sell", "chocolate"))
print(stock_availability(["chocolate", "vanilla", "banana"], "sell", "cookie"))
| 768 | 0 | 23 |
43439ba05e28dc69307c584e475a2835beae20e1 | 82 | py | Python | fasthdl/__init__.py | davidbrochart/fasthdl | 75952ac031da088c3f70cb25348077747538269e | [
"MIT"
] | null | null | null | fasthdl/__init__.py | davidbrochart/fasthdl | 75952ac031da088c3f70cb25348077747538269e | [
"MIT"
] | null | null | null | fasthdl/__init__.py | davidbrochart/fasthdl | 75952ac031da088c3f70cb25348077747538269e | [
"MIT"
] | null | null | null | from .fasthdl import module, In, Out, Reg, Wire, X # noqa
__version__ = "0.0.1"
| 20.5 | 58 | 0.658537 | from .fasthdl import module, In, Out, Reg, Wire, X # noqa
__version__ = "0.0.1"
| 0 | 0 | 0 |
16d24b96bac7f02d49ab92778968adba661cdf59 | 2,518 | py | Python | vmail_manager/db.py | domrim/vmail-manager | 6ee4a8761d31cd9f35d3bf8f6ec08c049d9563ba | [
"MIT"
] | null | null | null | vmail_manager/db.py | domrim/vmail-manager | 6ee4a8761d31cd9f35d3bf8f6ec08c049d9563ba | [
"MIT"
] | null | null | null | vmail_manager/db.py | domrim/vmail-manager | 6ee4a8761d31cd9f35d3bf8f6ec08c049d9563ba | [
"MIT"
] | 1 | 2020-10-11T11:02:59.000Z | 2020-10-11T11:02:59.000Z | from urllib.parse import quote_plus
import click
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, create_engine
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
def get_db_connection(config):
"""Configures db connection with config object.""" # TODO: URL-Parsing for special characters.
if config["DB"]["driver"].get() is None:
db_connector = f"{config['DB']['dialect'].get(str)}"
else:
db_connector = (
f"{config['DB']['dialect'].get(str)}+{config['DB']['driver'].get(str)}"
)
db = (
f"{db_connector}://{config['DB']['username'].get(str)}:{quote_plus(config['DB']['password'].get(str))}@"
f"{config['DB']['host'].get(str)}:{config['DB']['port'].get(int)}/{config['DB']['database'].get(str)}"
)
engine = create_engine(db)
try:
Base.metadata.create_all(bind=engine)
session = sessionmaker(bind=engine)
except OperationalError as e:
click.echo(f"Could not connect to '{db}'.\n" f"Details on error:\n" f"{e}")
raise click.Abort
connection = session()
return connection
| 32.701299 | 112 | 0.672756 | from urllib.parse import quote_plus
import click
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, create_engine
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
def get_db_connection(config):
"""Configures db connection with config object.""" # TODO: URL-Parsing for special characters.
if config["DB"]["driver"].get() is None:
db_connector = f"{config['DB']['dialect'].get(str)}"
else:
db_connector = (
f"{config['DB']['dialect'].get(str)}+{config['DB']['driver'].get(str)}"
)
db = (
f"{db_connector}://{config['DB']['username'].get(str)}:{quote_plus(config['DB']['password'].get(str))}@"
f"{config['DB']['host'].get(str)}:{config['DB']['port'].get(int)}/{config['DB']['database'].get(str)}"
)
engine = create_engine(db)
try:
Base.metadata.create_all(bind=engine)
session = sessionmaker(bind=engine)
except OperationalError as e:
click.echo(f"Could not connect to '{db}'.\n" f"Details on error:\n" f"{e}")
raise click.Abort
connection = session()
return connection
def try_commit(connection):
try:
connection.commit()
except Exception as e:
click.echo(e)
raise click.Abort
class Domains(Base):
__tablename__ = "domains"
id = Column(Integer, primary_key=True, autoincrement=True)
domain = Column(String(255), nullable=False, unique=True)
class Accounts(Base):
__tablename__ = "accounts"
id = Column(Integer, primary_key=True, autoincrement=True)
username = Column(String(64), nullable=False, unique=True)
domain = Column(
String(255), ForeignKey("domains.domain"), nullable=False, unique=True
)
password = Column(String(255), nullable=False)
quota = Column(Integer, default=0)
enabled = Column(Boolean, default=False)
sendonly = Column(Boolean, default=False)
class Aliases(Base):
__tablename__ = "aliases"
id = Column(Integer, primary_key=True, autoincrement=True)
source_username = Column(String(64), nullable=False, unique=True)
source_domain = Column(
String(255), ForeignKey("domains.domain"), nullable=False, unique=True
)
destination_username = Column(String(64), nullable=False, unique=True)
destination_domain = Column(String(255), nullable=False, unique=True)
enabled = Column(Boolean, default=False)
| 118 | 1,070 | 92 |
17f456c7f6ff0906832dc437efd95e15c6fdc4c9 | 189 | py | Python | yamlpath/__init__.py | gdubicki/yamlpath | 2b6816c2b427250ec58118c64ea09ee870c8f9bb | [
"ISC"
] | 52 | 2019-05-04T03:01:19.000Z | 2022-03-17T13:31:11.000Z | yamlpath/__init__.py | gdubicki/yamlpath | 2b6816c2b427250ec58118c64ea09ee870c8f9bb | [
"ISC"
] | 49 | 2019-06-06T05:07:10.000Z | 2022-03-25T07:18:48.000Z | yamlpath/__init__.py | gdubicki/yamlpath | 2b6816c2b427250ec58118c64ea09ee870c8f9bb | [
"ISC"
] | 8 | 2019-08-12T21:19:27.000Z | 2021-12-17T09:20:10.000Z | """Core YAML Path classes."""
# Establish the version number common to all components
__version__ = "3.6.3"
from yamlpath.yamlpath import YAMLPath
from yamlpath.processor import Processor
| 27 | 55 | 0.78836 | """Core YAML Path classes."""
# Establish the version number common to all components
__version__ = "3.6.3"
from yamlpath.yamlpath import YAMLPath
from yamlpath.processor import Processor
| 0 | 0 | 0 |
af424191c77f81f00be472660835e1271e2f1fec | 3,864 | py | Python | smtk/attribute/testing/python/copyDefinitionTest.py | yumin/SMTK | d280f10c5b70953b2a0196f71832955c7fc75e7f | [
"BSD-3-Clause-Clear"
] | null | null | null | smtk/attribute/testing/python/copyDefinitionTest.py | yumin/SMTK | d280f10c5b70953b2a0196f71832955c7fc75e7f | [
"BSD-3-Clause-Clear"
] | 4 | 2016-11-10T15:49:51.000Z | 2017-02-06T23:24:16.000Z | smtk/attribute/testing/python/copyDefinitionTest.py | yumin/SMTK | d280f10c5b70953b2a0196f71832955c7fc75e7f | [
"BSD-3-Clause-Clear"
] | null | null | null | #=============================================================================
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See LICENSE.txt for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
#=============================================================================
"""
Test smtk.attribute.System.copyDefinition() method
Uses copyDefinitionTest.sbt in the SMTKTestData repo.
"""
import logging
import os
import sys
try:
import smtk
except ImportError:
print
print 'Not able to import smtk library. You might need to:'
print ' - Use the PYTHONPATH variable to point to the smtk python lib'
print ' - And/or use the LD_LIBRARY_PATH variable to point to the shiboken libraries'
print
sys.exit(-1)
logging.basicConfig(level=logging.DEBUG)
SBT_FILENAME = 'copyDefinitionTest.sbt'
SBI_FILENAME = 'copyDefinitionTest.sbi'
# ---------------------------------------------------------------------
if __name__ == '__main__':
# First (and) only argument is the path to the smtk data directory
if len(sys.argv) < 2:
print
print 'Test smtk.attribute.System.copyDefinition()'
print 'Usage: python %s path-to-SMTKTestData'
print
sys.exit(-1)
logging.debug('LD_LIBRARY_PATH = %s' % os.environ.get('LD_LIBRARY_PATH'))
logging.debug('PYTHONPATH = %s' % os.environ.get('PYTHONPATH'))
# Load attribute file into system
smtk_test_data = sys.argv[1]
att_folder = os.path.join(smtk_test_data, 'smtk', 'attribute')
att_path = os.path.join(att_folder, SBT_FILENAME)
logging.info('Reading %s' % att_path)
input_system = smtk.attribute.System()
reader = smtk.io.AttributeReader()
logger = smtk.io.Logger()
err = reader.read(input_system, att_path, logger)
if err:
logging.error("Unable to load template file")
logging.error(logger.convertToString())
sys.exit(-2)
err_count = 0
# Instantiate 2nd system
test_system = smtk.attribute.System()
# Copy SecondConcrete definition, which should copy alot of stuff
source_def = input_system.findDefinition('SecondConcrete')
test_system.copyDefinition(source_def, 0)
expected_types = [
'SecondConcrete', 'AnotherAbstractBase', 'CommonBase',
'FirstConcrete', 'PolyLinearFunction'
]
for def_type in expected_types:
defn = test_system.findDefinition(def_type)
if defn is None:
logging.error('Expected %s definition, found None' % def_type)
err_count += 1
# Add explicit test for conditional children
defn = test_system.findDefinition('SecondConcrete')
if defn:
i = defn.findItemPosition('ConditionalSelectionList')
item = defn.itemDefinition(i)
if item:
string_item = smtk.attribute.to_concrete(item)
list_one = string_item.conditionalItems('One')
if len(list_one) != 1:
msg = 'Expected \"One\" enum to have 1 conditional item, found %d' % \
len(list_one)
logging.error(msg)
err_count += 1
list_two = string_item.conditionalItems('Two')
if len(list_two) != 3:
msg = 'Expected \"Two\" enum to have 3 conditional items, found %d' % \
len(list_two)
logging.error(msg)
err_count += 1
else:
logging.error('Did not find ConditionalSelectionList item')
err_count += 1
# Note there is ALOT more that could & should be verified here
logging.debug('Writing system')
# Write data out FYI
writer = smtk.io.AttributeWriter()
err = writer.write(test_system, SBI_FILENAME, logger)
if err:
logging.error("Unable to write output file")
sys.exit(-3)
logging.info('Wrote %s' % SBI_FILENAME)
# Check error count
if err_count > 0:
sys.exit(err_count)
sys.exit(0)
| 30.912 | 90 | 0.656056 | #=============================================================================
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See LICENSE.txt for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
#=============================================================================
"""
Test smtk.attribute.System.copyDefinition() method
Uses copyDefinitionTest.sbt in the SMTKTestData repo.
"""
import logging
import os
import sys
try:
import smtk
except ImportError:
print
print 'Not able to import smtk library. You might need to:'
print ' - Use the PYTHONPATH variable to point to the smtk python lib'
print ' - And/or use the LD_LIBRARY_PATH variable to point to the shiboken libraries'
print
sys.exit(-1)
logging.basicConfig(level=logging.DEBUG)
SBT_FILENAME = 'copyDefinitionTest.sbt'
SBI_FILENAME = 'copyDefinitionTest.sbi'
# ---------------------------------------------------------------------
if __name__ == '__main__':
# First (and) only argument is the path to the smtk data directory
if len(sys.argv) < 2:
print
print 'Test smtk.attribute.System.copyDefinition()'
print 'Usage: python %s path-to-SMTKTestData'
print
sys.exit(-1)
logging.debug('LD_LIBRARY_PATH = %s' % os.environ.get('LD_LIBRARY_PATH'))
logging.debug('PYTHONPATH = %s' % os.environ.get('PYTHONPATH'))
# Load attribute file into system
smtk_test_data = sys.argv[1]
att_folder = os.path.join(smtk_test_data, 'smtk', 'attribute')
att_path = os.path.join(att_folder, SBT_FILENAME)
logging.info('Reading %s' % att_path)
input_system = smtk.attribute.System()
reader = smtk.io.AttributeReader()
logger = smtk.io.Logger()
err = reader.read(input_system, att_path, logger)
if err:
logging.error("Unable to load template file")
logging.error(logger.convertToString())
sys.exit(-2)
err_count = 0
# Instantiate 2nd system
test_system = smtk.attribute.System()
# Copy SecondConcrete definition, which should copy alot of stuff
source_def = input_system.findDefinition('SecondConcrete')
test_system.copyDefinition(source_def, 0)
expected_types = [
'SecondConcrete', 'AnotherAbstractBase', 'CommonBase',
'FirstConcrete', 'PolyLinearFunction'
]
for def_type in expected_types:
defn = test_system.findDefinition(def_type)
if defn is None:
logging.error('Expected %s definition, found None' % def_type)
err_count += 1
# Add explicit test for conditional children
defn = test_system.findDefinition('SecondConcrete')
if defn:
i = defn.findItemPosition('ConditionalSelectionList')
item = defn.itemDefinition(i)
if item:
string_item = smtk.attribute.to_concrete(item)
list_one = string_item.conditionalItems('One')
if len(list_one) != 1:
msg = 'Expected \"One\" enum to have 1 conditional item, found %d' % \
len(list_one)
logging.error(msg)
err_count += 1
list_two = string_item.conditionalItems('Two')
if len(list_two) != 3:
msg = 'Expected \"Two\" enum to have 3 conditional items, found %d' % \
len(list_two)
logging.error(msg)
err_count += 1
else:
logging.error('Did not find ConditionalSelectionList item')
err_count += 1
# Note there is ALOT more that could & should be verified here
logging.debug('Writing system')
# Write data out FYI
writer = smtk.io.AttributeWriter()
err = writer.write(test_system, SBI_FILENAME, logger)
if err:
logging.error("Unable to write output file")
sys.exit(-3)
logging.info('Wrote %s' % SBI_FILENAME)
# Check error count
if err_count > 0:
sys.exit(err_count)
sys.exit(0)
| 0 | 0 | 0 |
1476f3d6490ede2b5dcd94ceeda3da0b370d4a1b | 6,727 | py | Python | utils/preproccessing/preprocessor.py | Mouse-BB-Team/Bot-Detection | 4438d8ccec1baaa22f3357213e6d52a62ff6d618 | [
"MIT"
] | 5 | 2020-09-30T16:58:59.000Z | 2021-11-30T22:34:10.000Z | utils/preproccessing/preprocessor.py | Mouse-BB-Team/Bot-Detection | 4438d8ccec1baaa22f3357213e6d52a62ff6d618 | [
"MIT"
] | null | null | null | utils/preproccessing/preprocessor.py | Mouse-BB-Team/Bot-Detection | 4438d8ccec1baaa22f3357213e6d52a62ff6d618 | [
"MIT"
] | null | null | null | from math import floor
from random import shuffle
from pandas import DataFrame
import numpy as np
from matplotlib import pyplot
from PIL import Image
BOT_USER = 'usr-71'
TRAIN_SET_PERCENT = 0.75
SCALE_X = 299
SCALE_Y = 299
| 33.80402 | 115 | 0.646202 | from math import floor
from random import shuffle
from pandas import DataFrame
import numpy as np
from matplotlib import pyplot
from PIL import Image
BOT_USER = 'usr-71'
TRAIN_SET_PERCENT = 0.75
SCALE_X = 299
SCALE_Y = 299
class Preprocessor:
def __init__(self, dataset, dims=(SCALE_X, SCALE_Y), train_set_percent=TRAIN_SET_PERCENT, bot_user_id=BOT_USER,
bot_dataset_multiplier=1, print_pictures=False):
self.dataset = dataset
self.dims = dims
self.train_set_percent = train_set_percent
self.bot_user_id = bot_user_id
self.bot_set_multiplier = bot_dataset_multiplier
self.print_pictures = print_pictures
def save_data_as_images(self, path, sub_user_path, sub_bot_path):
user_set, bot_set = self.__extract_bot_dataset()
user_set = self.__generate_images(user_set)
bot_set = self.__generate_images(bot_set)
self._save_to_files(user_set, path, sub_user_path)
self._save_to_files(bot_set, path, sub_bot_path)
def get_datasets(self):
user_set, bot_set = self.__extract_bot_dataset()
training_user_set_size, training_bot_set_size = self.__count_dataset_sizes(user_set, bot_set)
training_user_set, validation_user_set = self.__split_sets(user_set, training_user_set_size)
training_bot_set, validation_bot_set = self.__split_sets(bot_set, training_bot_set_size)
training_bot_set = self.__multiply_bot_set(training_bot_set)
validation_bot_set = self.__multiply_bot_set(validation_bot_set)
training_dataset, training_labels = self.__make_dataset(training_user_set, training_bot_set)
validation_dataset, validation_labels = self.__make_dataset(validation_user_set, validation_bot_set)
training_labels = np.array(training_labels)
validation_labels = np.array(validation_labels)
training_dataset = self.__generate_images(training_dataset) / 255.0
validation_dataset = self.__generate_images(validation_dataset) / 255.0
return (training_dataset, training_labels), (validation_dataset, validation_labels)
@staticmethod
def _save_to_files(dataset, path, subpath):
for i in range(0, len(dataset)):
im = Image.fromarray(np.uint8(dataset[i]))
im.save(path + subpath + subpath + str(i) + '.jpg')
def __extract_bot_dataset(self):
bot_set = self.dataset[self.bot_user_id]
del self.dataset[self.bot_user_id]
user_set = self.__flat_dataset(self.dataset)
return user_set, bot_set
@staticmethod
def __flat_dataset(dataset: dict):
result_dataset = []
for key in dataset.keys():
result_dataset.extend(dataset[key])
return result_dataset
def __count_dataset_sizes(self, user_dataset, bot_dataset):
total_user_elements = self.__count_elements(user_dataset)
total_bot_elements = self.__count_elements(bot_dataset)
training_user_set_size = floor(TRAIN_SET_PERCENT * total_user_elements)
training_bot_set_size = floor(TRAIN_SET_PERCENT * total_bot_elements)
return training_user_set_size, training_bot_set_size
@staticmethod
def __count_elements(dataset_list: list):
total = 0
for element in dataset_list:
total += element['sequenceLength'][0]
return total
@staticmethod
def __split_sets(dataset: list, split_size: int):
total = 0
training_set = []
validation_set = []
for element in dataset:
if total < split_size:
training_set.append(element)
total += element['sequenceLength'][0]
else:
validation_set.append(element)
return training_set, validation_set
def __multiply_bot_set(self, bot_set):
return bot_set * self.bot_set_multiplier
def __make_dataset(self, user_set, bot_set):
dataset = user_set + bot_set
labels = self.__generate_labels(user_set, bot_set)
return self.__shuffle_dataset(dataset, labels)
@staticmethod
def __generate_labels(user_set, bot_set):
return [0 for _ in range(len(user_set))] + [1 for _ in range(len(bot_set))]
@staticmethod
def __shuffle_dataset(dataset, labels):
dataset_tuple = [(element, label) for element, label in zip(dataset, labels)]
shuffle(dataset_tuple)
dataset = [element[0] for element in dataset_tuple]
labels = [element[1] for element in dataset_tuple]
return dataset, labels
def __generate_images(self, dataset: list):
result_dataset = []
for element in dataset:
result_dataset.append(self.__prepare_image(element, self.dims[0], self.dims[1]))
return np.array(result_dataset)
def __prepare_image(self, sequence: DataFrame, x_res: int, y_res: int):
def get_x(e: dict):
if 'xCoordinate' in e:
return e['xCoordinate']
else:
return 0
def get_y(e: dict):
if 'yCoordinate' in e:
return e['yCoordinate']
else:
return 0
x_resolution = sequence['events'][0]['xResolution']
y_resolution = sequence['events'][0]['yResolution']
scaler_x = x_res / x_resolution
scaler_y = y_res / y_resolution
array = np.zeros((x_res, y_res, 3), dtype=int)
prev_x = get_x(sequence['events'][0])
prev_y = get_y(sequence['events'][0])
for element in sequence['events'][1:]:
x = get_x(element)
y = get_y(element)
points = self.__interpolate_points((prev_x, prev_y), (x, y))
x_points = [floor(p[0] * scaler_x) for p in points]
y_points = [floor(p[1] * scaler_y) for p in points]
self.__add_points_to_array(x_points, y_points, array)
prev_x = x
prev_y = y
return array
@staticmethod
def __interpolate_points(p1: (int, int), p2: (int, int)):
points = [p1]
if p1[0] == p2[0]:
for i in range(p1[1] + 1, p2[1]):
points.append((p1[0], i))
else:
a = (p2[1] - p1[1])/(p2[0] - p1[0])
b = p1[1] - a * p1[0]
for i in range(p1[0] + 1, p2[0]):
points.append((i, floor(i * a + b)))
points.append(p2)
return points
@staticmethod
def __add_points_to_array(x_points, y_points, array):
for e in zip(x_points, y_points):
array[e[0]][e[1]][0] = 255
@staticmethod
def print_picture(array):
pyplot.imshow(array, interpolation='none')
pyplot.show()
| 5,832 | 646 | 23 |
c74386b0ac4ed3fa0a7045c9b5543b3109874a69 | 16,555 | py | Python | env/lib/python3.6/site-packages/nibabel/arrayproxy.py | Raniac/NEURO-LEARN | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | env/lib/python3.6/site-packages/nibabel/arrayproxy.py | Raniac/neurolearn_dev | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | env/lib/python3.6/site-packages/nibabel/arrayproxy.py | Raniac/NEURO-LEARN | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 1 | 2020-07-17T12:49:49.000Z | 2020-07-17T12:49:49.000Z | # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""" Array proxy base class
The proxy API is - at minimum:
* The object has a read-only attribute ``shape``
* read only ``is_proxy`` attribute / property set to True
* the object returns the data array from ``np.asarray(prox)``
* returns array slice from ``prox[<slice_spec>]`` where ``<slice_spec>`` is any
ndarray slice specification that does not use numpy 'advanced indexing'.
* modifying no object outside ``obj`` will affect the result of
``np.asarray(obj)``. Specifically:
* Changes in position (``obj.tell()``) of passed file-like objects will
not affect the output of from ``np.asarray(proxy)``.
* if you pass a header into the __init__, then modifying the original
header will not affect the result of the array return.
See :mod:`nibabel.tests.test_proxy_api` for proxy API conformance checks.
"""
from contextlib import contextmanager
from threading import RLock
import numpy as np
from .deprecated import deprecate_with_version
from .volumeutils import array_from_file, apply_read_scaling
from .fileslice import fileslice
from .keywordonly import kw_only_meth
from . import openers
"""This flag controls whether a new file handle is created every time an image
is accessed through an ``ArrayProxy``, or a single file handle is created and
used for the lifetime of the ``ArrayProxy``. It should be set to one of
``True``, ``False``, or ``'auto'``.
Management of file handles will be performed either by ``ArrayProxy`` objects,
or by the ``indexed_gzip`` package if it is used.
If this flag is set to ``True``, a single file handle is created and used. If
``False``, a new file handle is created every time the image is accessed. For
gzip files, if ``'auto'``, and the optional ``indexed_gzip`` dependency is
present, a single file handle is created and persisted. If ``indexed_gzip`` is
not available, behaviour is the same as if ``keep_file_open is False``.
If this is set to any other value, attempts to create an ``ArrayProxy`` without
specifying the ``keep_file_open`` flag will result in a ``ValueError`` being
raised.
.. warning:: Setting this flag to a value of ``'auto'`` will become deprecated
behaviour in version 2.4.0. Support for ``'auto'`` will be removed
in version 3.0.0.
"""
KEEP_FILE_OPEN_DEFAULT = False
class ArrayProxy(object):
""" Class to act as proxy for the array that can be read from a file
The array proxy allows us to freeze the passed fileobj and header such that
it returns the expected data array.
This implementation assumes a contiguous array in the file object, with one
of the numpy dtypes, starting at a given file position ``offset`` with
single ``slope`` and ``intercept`` scaling to produce output values.
The class ``__init__`` requires a spec which defines how the data will be
read and rescaled. The spec may be a tuple of length 2 - 5, containing the
shape, storage dtype, offset, slope and intercept, or a ``header`` object
with methods:
* get_data_shape
* get_data_dtype
* get_data_offset
* get_slope_inter
A header should also have a 'copy' method. This requirement will go away
when the deprecated 'header' propoerty goes away.
This implementation allows us to deal with Analyze and its variants,
including Nifti1, and with the MGH format.
Other image types might need more specific classes to implement the API.
See :mod:`nibabel.minc1`, :mod:`nibabel.ecat` and :mod:`nibabel.parrec` for
examples.
"""
# Assume Fortran array memory layout
order = 'F'
_header = None
@kw_only_meth(2)
def __init__(self, file_like, spec, mmap=True, keep_file_open=None):
"""Initialize array proxy instance
Parameters
----------
file_like : object
File-like object or filename. If file-like object, should implement
at least ``read`` and ``seek``.
spec : object or tuple
Tuple must have length 2-5, with the following values:
#. shape: tuple - tuple of ints describing shape of data;
#. storage_dtype: dtype specifier - dtype of array inside proxied
file, or input to ``numpy.dtype`` to specify array dtype;
#. offset: int - offset, in bytes, of data array from start of file
(default: 0);
#. slope: float - scaling factor for resulting data (default: 1.0);
#. inter: float - intercept for rescaled data (default: 0.0).
OR
Header object implementing ``get_data_shape``, ``get_data_dtype``,
``get_data_offset``, ``get_slope_inter``
mmap : {True, False, 'c', 'r'}, optional, keyword only
`mmap` controls the use of numpy memory mapping for reading data.
If False, do not try numpy ``memmap`` for data array. If one of
{'c', 'r'}, try numpy memmap with ``mode=mmap``. A `mmap` value of
True gives the same behavior as ``mmap='c'``. If `file_like`
cannot be memory-mapped, ignore `mmap` value and read array from
file.
keep_file_open : { None, 'auto', True, False }, optional, keyword only
`keep_file_open` controls whether a new file handle is created
every time the image is accessed, or a single file handle is
created and used for the lifetime of this ``ArrayProxy``. If
``True``, a single file handle is created and used. If ``False``,
a new file handle is created every time the image is accessed. If
``'auto'``, and the optional ``indexed_gzip`` dependency is
present, a single file handle is created and persisted. If
``indexed_gzip`` is not available, behaviour is the same as if
``keep_file_open is False``. If ``file_like`` is an open file
handle, this setting has no effect. The default value (``None``)
will result in the value of ``KEEP_FILE_OPEN_DEFAULT`` being used.
"""
if mmap not in (True, False, 'c', 'r'):
raise ValueError("mmap should be one of {True, False, 'c', 'r'}")
self.file_like = file_like
if hasattr(spec, 'get_data_shape'):
slope, inter = spec.get_slope_inter()
par = (spec.get_data_shape(),
spec.get_data_dtype(),
spec.get_data_offset(),
1. if slope is None else slope,
0. if inter is None else inter)
# Reference to original header; we will remove this soon
self._header = spec.copy()
elif 2 <= len(spec) <= 5:
optional = (0, 1., 0.)
par = spec + optional[len(spec) - 2:]
else:
raise TypeError('spec must be tuple of length 2-5 or header object')
# Copies of values needed to read array
self._shape, self._dtype, self._offset, self._slope, self._inter = par
# Permit any specifier that can be interpreted as a numpy dtype
self._dtype = np.dtype(self._dtype)
self._mmap = mmap
# Flags to keep track of whether a single ImageOpener is created, and
# whether a single underlying file handle is created.
self._keep_file_open, self._persist_opener = \
self._should_keep_file_open(file_like, keep_file_open)
self._lock = RLock()
def __del__(self):
"""If this ``ArrayProxy`` was created with ``keep_file_open=True``,
the open file object is closed if necessary.
"""
if hasattr(self, '_opener') and not self._opener.closed:
self._opener.close_if_mine()
self._opener = None
def __getstate__(self):
"""Returns the state of this ``ArrayProxy`` during pickling. """
state = self.__dict__.copy()
state.pop('_lock', None)
return state
def __setstate__(self, state):
"""Sets the state of this ``ArrayProxy`` during unpickling. """
self.__dict__.update(state)
self._lock = RLock()
def _should_keep_file_open(self, file_like, keep_file_open):
"""Called by ``__init__``.
This method determines how to manage ``ImageOpener`` instances,
and the underlying file handles - the behaviour depends on:
- whether ``file_like`` is an an open file handle, or a path to a
``'.gz'`` file, or a path to a non-gzip file.
- whether ``indexed_gzip`` is present (see
:attr:`.openers.HAVE_INDEXED_GZIP`).
An ``ArrayProxy`` object uses two internal flags to manage
``ImageOpener`` instances and underlying file handles.
- The ``_persist_opener`` flag controls whether a single
``ImageOpener`` should be created and used for the lifetime of
this ``ArrayProxy``, or whether separate ``ImageOpener`` instances
should be created on each file access.
- The ``_keep_file_open`` flag controls qwhether the underlying file
handle should be kept open for the lifetime of this
``ArrayProxy``, or whether the file handle should be (re-)opened
and closed on each file access.
The internal ``_keep_file_open`` flag is only relevant if
``file_like`` is a ``'.gz'`` file, and the ``indexed_gzip`` library is
present.
This method returns the values to be used for the internal
``_persist_opener`` and ``_keep_file_open`` flags; these values are
derived according to the following rules:
1. If ``file_like`` is a file(-like) object, both flags are set to
``False``.
2. If ``keep_file_open`` (as passed to :meth:``__init__``) is
``True``, both internal flags are set to ``True``.
3. If ``keep_file_open`` is ``False``, but ``file_like`` is not a path
to a ``.gz`` file or ``indexed_gzip`` is not present, both flags
are set to ``False``.
4. If ``keep_file_open`` is ``False``, ``file_like`` is a path to a
``.gz`` file, and ``indexed_gzip`` is present, ``_persist_opener``
is set to ``True``, and ``_keep_file_open`` is set to ``False``.
In this case, file handle management is delegated to the
``indexed_gzip`` library.
5. If ``keep_file_open`` is ``'auto'``, ``file_like`` is a path to a
``.gz`` file, and ``indexed_gzip`` is present, both internal flags
are set to ``True``.
6. If ``keep_file_open`` is ``'auto'``, and ``file_like`` is not a
path to a ``.gz`` file, or ``indexed_gzip`` is not present, both
internal flags are set to ``False``.
Note that a value of ``'auto'`` for ``keep_file_open`` will become
deprecated behaviour in version 2.4.0, and support for ``'auto'`` will
be removed in version 3.0.0.
Parameters
----------
file_like : object
File-like object or filename, as passed to ``__init__``.
keep_file_open : { 'auto', True, False }
Flag as passed to ``__init__``.
Returns
-------
A tuple containing:
- ``keep_file_open`` flag to control persistence of file handles
- ``persist_opener`` flag to control persistence of ``ImageOpener``
objects.
"""
if keep_file_open is None:
keep_file_open = KEEP_FILE_OPEN_DEFAULT
if keep_file_open not in ('auto', True, False):
raise ValueError('keep_file_open should be one of {None, '
'\'auto\', True, False}')
# file_like is a handle - keep_file_open is irrelevant
if hasattr(file_like, 'read') and hasattr(file_like, 'seek'):
return False, False
# if the file is a gzip file, and we have_indexed_gzip,
have_igzip = openers.HAVE_INDEXED_GZIP and file_like.endswith('.gz')
if keep_file_open == 'auto':
return have_igzip, have_igzip
elif keep_file_open:
return True, True
else:
return False, have_igzip
@property
@deprecate_with_version('ArrayProxy.header deprecated', '2.2', '3.0')
@property
@property
@property
@property
@property
@property
@property
@contextmanager
def _get_fileobj(self):
"""Create and return a new ``ImageOpener``, or return an existing one.
The specific behaviour depends on the value of the ``keep_file_open``
flag that was passed to ``__init__``.
Yields
------
ImageOpener
A newly created ``ImageOpener`` instance, or an existing one,
which provides access to the file.
"""
if self._persist_opener:
if not hasattr(self, '_opener'):
self._opener = openers.ImageOpener(
self.file_like, keep_open=self._keep_file_open)
yield self._opener
else:
with openers.ImageOpener(
self.file_like, keep_open=False) as opener:
yield opener
def get_unscaled(self):
""" Read of data from file
This is an optional part of the proxy API
"""
with self._get_fileobj() as fileobj, self._lock:
raw_data = array_from_file(self._shape,
self._dtype,
fileobj,
offset=self._offset,
order=self.order,
mmap=self._mmap)
return raw_data
def reshape(self, shape):
""" Return an ArrayProxy with a new shape, without modifying data """
size = np.prod(self._shape)
# Calculate new shape if not fully specified
from operator import mul
from functools import reduce
n_unknowns = len([e for e in shape if e == -1])
if n_unknowns > 1:
raise ValueError("can only specify one unknown dimension")
elif n_unknowns == 1:
known_size = reduce(mul, shape, -1)
unknown_size = size // known_size
shape = tuple(unknown_size if e == -1 else e for e in shape)
if np.prod(shape) != size:
raise ValueError("cannot reshape array of size {:d} into shape "
"{!s}".format(size, shape))
return self.__class__(file_like=self.file_like,
spec=(shape, self._dtype, self._offset,
self._slope, self._inter),
mmap=self._mmap)
def is_proxy(obj):
""" Return True if `obj` is an array proxy
"""
try:
return obj.is_proxy
except AttributeError:
return False
def reshape_dataobj(obj, shape):
""" Use `obj` reshape method if possible, else numpy reshape function
"""
return (obj.reshape(shape) if hasattr(obj, 'reshape')
else np.reshape(obj, shape))
| 40.476773 | 80 | 0.603503 | # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""" Array proxy base class
The proxy API is - at minimum:
* The object has a read-only attribute ``shape``
* read only ``is_proxy`` attribute / property set to True
* the object returns the data array from ``np.asarray(prox)``
* returns array slice from ``prox[<slice_spec>]`` where ``<slice_spec>`` is any
ndarray slice specification that does not use numpy 'advanced indexing'.
* modifying no object outside ``obj`` will affect the result of
``np.asarray(obj)``. Specifically:
* Changes in position (``obj.tell()``) of passed file-like objects will
not affect the output of from ``np.asarray(proxy)``.
* if you pass a header into the __init__, then modifying the original
header will not affect the result of the array return.
See :mod:`nibabel.tests.test_proxy_api` for proxy API conformance checks.
"""
from contextlib import contextmanager
from threading import RLock
import numpy as np
from .deprecated import deprecate_with_version
from .volumeutils import array_from_file, apply_read_scaling
from .fileslice import fileslice
from .keywordonly import kw_only_meth
from . import openers
"""This flag controls whether a new file handle is created every time an image
is accessed through an ``ArrayProxy``, or a single file handle is created and
used for the lifetime of the ``ArrayProxy``. It should be set to one of
``True``, ``False``, or ``'auto'``.
Management of file handles will be performed either by ``ArrayProxy`` objects,
or by the ``indexed_gzip`` package if it is used.
If this flag is set to ``True``, a single file handle is created and used. If
``False``, a new file handle is created every time the image is accessed. For
gzip files, if ``'auto'``, and the optional ``indexed_gzip`` dependency is
present, a single file handle is created and persisted. If ``indexed_gzip`` is
not available, behaviour is the same as if ``keep_file_open is False``.
If this is set to any other value, attempts to create an ``ArrayProxy`` without
specifying the ``keep_file_open`` flag will result in a ``ValueError`` being
raised.
.. warning:: Setting this flag to a value of ``'auto'`` will become deprecated
behaviour in version 2.4.0. Support for ``'auto'`` will be removed
in version 3.0.0.
"""
KEEP_FILE_OPEN_DEFAULT = False
class ArrayProxy(object):
""" Class to act as proxy for the array that can be read from a file
The array proxy allows us to freeze the passed fileobj and header such that
it returns the expected data array.
This implementation assumes a contiguous array in the file object, with one
of the numpy dtypes, starting at a given file position ``offset`` with
single ``slope`` and ``intercept`` scaling to produce output values.
The class ``__init__`` requires a spec which defines how the data will be
read and rescaled. The spec may be a tuple of length 2 - 5, containing the
shape, storage dtype, offset, slope and intercept, or a ``header`` object
with methods:
* get_data_shape
* get_data_dtype
* get_data_offset
* get_slope_inter
A header should also have a 'copy' method. This requirement will go away
when the deprecated 'header' propoerty goes away.
This implementation allows us to deal with Analyze and its variants,
including Nifti1, and with the MGH format.
Other image types might need more specific classes to implement the API.
See :mod:`nibabel.minc1`, :mod:`nibabel.ecat` and :mod:`nibabel.parrec` for
examples.
"""
# Assume Fortran array memory layout
order = 'F'
_header = None
@kw_only_meth(2)
def __init__(self, file_like, spec, mmap=True, keep_file_open=None):
"""Initialize array proxy instance
Parameters
----------
file_like : object
File-like object or filename. If file-like object, should implement
at least ``read`` and ``seek``.
spec : object or tuple
Tuple must have length 2-5, with the following values:
#. shape: tuple - tuple of ints describing shape of data;
#. storage_dtype: dtype specifier - dtype of array inside proxied
file, or input to ``numpy.dtype`` to specify array dtype;
#. offset: int - offset, in bytes, of data array from start of file
(default: 0);
#. slope: float - scaling factor for resulting data (default: 1.0);
#. inter: float - intercept for rescaled data (default: 0.0).
OR
Header object implementing ``get_data_shape``, ``get_data_dtype``,
``get_data_offset``, ``get_slope_inter``
mmap : {True, False, 'c', 'r'}, optional, keyword only
`mmap` controls the use of numpy memory mapping for reading data.
If False, do not try numpy ``memmap`` for data array. If one of
{'c', 'r'}, try numpy memmap with ``mode=mmap``. A `mmap` value of
True gives the same behavior as ``mmap='c'``. If `file_like`
cannot be memory-mapped, ignore `mmap` value and read array from
file.
keep_file_open : { None, 'auto', True, False }, optional, keyword only
`keep_file_open` controls whether a new file handle is created
every time the image is accessed, or a single file handle is
created and used for the lifetime of this ``ArrayProxy``. If
``True``, a single file handle is created and used. If ``False``,
a new file handle is created every time the image is accessed. If
``'auto'``, and the optional ``indexed_gzip`` dependency is
present, a single file handle is created and persisted. If
``indexed_gzip`` is not available, behaviour is the same as if
``keep_file_open is False``. If ``file_like`` is an open file
handle, this setting has no effect. The default value (``None``)
will result in the value of ``KEEP_FILE_OPEN_DEFAULT`` being used.
"""
if mmap not in (True, False, 'c', 'r'):
raise ValueError("mmap should be one of {True, False, 'c', 'r'}")
self.file_like = file_like
if hasattr(spec, 'get_data_shape'):
slope, inter = spec.get_slope_inter()
par = (spec.get_data_shape(),
spec.get_data_dtype(),
spec.get_data_offset(),
1. if slope is None else slope,
0. if inter is None else inter)
# Reference to original header; we will remove this soon
self._header = spec.copy()
elif 2 <= len(spec) <= 5:
optional = (0, 1., 0.)
par = spec + optional[len(spec) - 2:]
else:
raise TypeError('spec must be tuple of length 2-5 or header object')
# Copies of values needed to read array
self._shape, self._dtype, self._offset, self._slope, self._inter = par
# Permit any specifier that can be interpreted as a numpy dtype
self._dtype = np.dtype(self._dtype)
self._mmap = mmap
# Flags to keep track of whether a single ImageOpener is created, and
# whether a single underlying file handle is created.
self._keep_file_open, self._persist_opener = \
self._should_keep_file_open(file_like, keep_file_open)
self._lock = RLock()
def __del__(self):
"""If this ``ArrayProxy`` was created with ``keep_file_open=True``,
the open file object is closed if necessary.
"""
if hasattr(self, '_opener') and not self._opener.closed:
self._opener.close_if_mine()
self._opener = None
def __getstate__(self):
"""Returns the state of this ``ArrayProxy`` during pickling. """
state = self.__dict__.copy()
state.pop('_lock', None)
return state
def __setstate__(self, state):
"""Sets the state of this ``ArrayProxy`` during unpickling. """
self.__dict__.update(state)
self._lock = RLock()
def _should_keep_file_open(self, file_like, keep_file_open):
"""Called by ``__init__``.
This method determines how to manage ``ImageOpener`` instances,
and the underlying file handles - the behaviour depends on:
- whether ``file_like`` is an an open file handle, or a path to a
``'.gz'`` file, or a path to a non-gzip file.
- whether ``indexed_gzip`` is present (see
:attr:`.openers.HAVE_INDEXED_GZIP`).
An ``ArrayProxy`` object uses two internal flags to manage
``ImageOpener`` instances and underlying file handles.
- The ``_persist_opener`` flag controls whether a single
``ImageOpener`` should be created and used for the lifetime of
this ``ArrayProxy``, or whether separate ``ImageOpener`` instances
should be created on each file access.
- The ``_keep_file_open`` flag controls qwhether the underlying file
handle should be kept open for the lifetime of this
``ArrayProxy``, or whether the file handle should be (re-)opened
and closed on each file access.
The internal ``_keep_file_open`` flag is only relevant if
``file_like`` is a ``'.gz'`` file, and the ``indexed_gzip`` library is
present.
This method returns the values to be used for the internal
``_persist_opener`` and ``_keep_file_open`` flags; these values are
derived according to the following rules:
1. If ``file_like`` is a file(-like) object, both flags are set to
``False``.
2. If ``keep_file_open`` (as passed to :meth:``__init__``) is
``True``, both internal flags are set to ``True``.
3. If ``keep_file_open`` is ``False``, but ``file_like`` is not a path
to a ``.gz`` file or ``indexed_gzip`` is not present, both flags
are set to ``False``.
4. If ``keep_file_open`` is ``False``, ``file_like`` is a path to a
``.gz`` file, and ``indexed_gzip`` is present, ``_persist_opener``
is set to ``True``, and ``_keep_file_open`` is set to ``False``.
In this case, file handle management is delegated to the
``indexed_gzip`` library.
5. If ``keep_file_open`` is ``'auto'``, ``file_like`` is a path to a
``.gz`` file, and ``indexed_gzip`` is present, both internal flags
are set to ``True``.
6. If ``keep_file_open`` is ``'auto'``, and ``file_like`` is not a
path to a ``.gz`` file, or ``indexed_gzip`` is not present, both
internal flags are set to ``False``.
Note that a value of ``'auto'`` for ``keep_file_open`` will become
deprecated behaviour in version 2.4.0, and support for ``'auto'`` will
be removed in version 3.0.0.
Parameters
----------
file_like : object
File-like object or filename, as passed to ``__init__``.
keep_file_open : { 'auto', True, False }
Flag as passed to ``__init__``.
Returns
-------
A tuple containing:
- ``keep_file_open`` flag to control persistence of file handles
- ``persist_opener`` flag to control persistence of ``ImageOpener``
objects.
"""
if keep_file_open is None:
keep_file_open = KEEP_FILE_OPEN_DEFAULT
if keep_file_open not in ('auto', True, False):
raise ValueError('keep_file_open should be one of {None, '
'\'auto\', True, False}')
# file_like is a handle - keep_file_open is irrelevant
if hasattr(file_like, 'read') and hasattr(file_like, 'seek'):
return False, False
# if the file is a gzip file, and we have_indexed_gzip,
have_igzip = openers.HAVE_INDEXED_GZIP and file_like.endswith('.gz')
if keep_file_open == 'auto':
return have_igzip, have_igzip
elif keep_file_open:
return True, True
else:
return False, have_igzip
@property
@deprecate_with_version('ArrayProxy.header deprecated', '2.2', '3.0')
def header(self):
return self._header
@property
def shape(self):
return self._shape
@property
def ndim(self):
return len(self.shape)
@property
def dtype(self):
return self._dtype
@property
def offset(self):
return self._offset
@property
def slope(self):
return self._slope
@property
def inter(self):
return self._inter
@property
def is_proxy(self):
return True
@contextmanager
def _get_fileobj(self):
"""Create and return a new ``ImageOpener``, or return an existing one.
The specific behaviour depends on the value of the ``keep_file_open``
flag that was passed to ``__init__``.
Yields
------
ImageOpener
A newly created ``ImageOpener`` instance, or an existing one,
which provides access to the file.
"""
if self._persist_opener:
if not hasattr(self, '_opener'):
self._opener = openers.ImageOpener(
self.file_like, keep_open=self._keep_file_open)
yield self._opener
else:
with openers.ImageOpener(
self.file_like, keep_open=False) as opener:
yield opener
def get_unscaled(self):
""" Read of data from file
This is an optional part of the proxy API
"""
with self._get_fileobj() as fileobj, self._lock:
raw_data = array_from_file(self._shape,
self._dtype,
fileobj,
offset=self._offset,
order=self.order,
mmap=self._mmap)
return raw_data
def __array__(self):
# Read array and scale
raw_data = self.get_unscaled()
return apply_read_scaling(raw_data, self._slope, self._inter)
def __getitem__(self, slicer):
with self._get_fileobj() as fileobj:
raw_data = fileslice(fileobj,
slicer,
self._shape,
self._dtype,
self._offset,
order=self.order,
lock=self._lock)
# Upcast as necessary for big slopes, intercepts
return apply_read_scaling(raw_data, self._slope, self._inter)
def reshape(self, shape):
""" Return an ArrayProxy with a new shape, without modifying data """
size = np.prod(self._shape)
# Calculate new shape if not fully specified
from operator import mul
from functools import reduce
n_unknowns = len([e for e in shape if e == -1])
if n_unknowns > 1:
raise ValueError("can only specify one unknown dimension")
elif n_unknowns == 1:
known_size = reduce(mul, shape, -1)
unknown_size = size // known_size
shape = tuple(unknown_size if e == -1 else e for e in shape)
if np.prod(shape) != size:
raise ValueError("cannot reshape array of size {:d} into shape "
"{!s}".format(size, shape))
return self.__class__(file_like=self.file_like,
spec=(shape, self._dtype, self._offset,
self._slope, self._inter),
mmap=self._mmap)
def is_proxy(obj):
""" Return True if `obj` is an array proxy
"""
try:
return obj.is_proxy
except AttributeError:
return False
def reshape_dataobj(obj, shape):
""" Use `obj` reshape method if possible, else numpy reshape function
"""
return (obj.reshape(shape) if hasattr(obj, 'reshape')
else np.reshape(obj, shape))
| 822 | 0 | 262 |
65a41a39ee19c5d93b77cb23d9f908cd2d716751 | 1,968 | py | Python | code/dataset/hotels.py | aarashfeizi/Proxy-Anchor-CVPR2020 | a7b9ed46d9d44841bd6bce78f4fddb95107a022b | [
"MIT"
] | null | null | null | code/dataset/hotels.py | aarashfeizi/Proxy-Anchor-CVPR2020 | a7b9ed46d9d44841bd6bce78f4fddb95107a022b | [
"MIT"
] | null | null | null | code/dataset/hotels.py | aarashfeizi/Proxy-Anchor-CVPR2020 | a7b9ed46d9d44841bd6bce78f4fddb95107a022b | [
"MIT"
] | null | null | null | import pandas as pd
from .base import *
from tqdm import tqdm | 42.782609 | 125 | 0.547256 | import pandas as pd
from .base import *
from tqdm import tqdm
class Hotels(BaseDataset):
def __init__(self, root, mode, transform=None, project_dir=None):
self.mode = mode
self.root = root + '/hotels50k/'
with open(project_dir + '/v5_splits/train_lbl2id.pkl', 'rb') as f:
self.train_lbl2id = pickle.load(f)
if mode == 'train':
self.config_file = pd.read_csv(project_dir + '/v5_splits/train_small.csv')
elif self.mode == 'eval':
self.config_file = pd.read_csv(project_dir + '/v5_splits/val1_small.csv')
self.transform = transform
print('getting classes')
self.classes = np.unique(self.config_file.label)
# if self.mode == 'train':
# self.classes = range(0, 100)
# elif self.mode == 'eval':
# self.classes = range(100, 200)
BaseDataset.__init__(self, self.root, self.mode, self.transform)
self.ys = list(self.config_file.label)
self.I = [i for i in range(len(self.ys))]
relative_im_paths = list(self.config_file.image)
self.im_paths = [os.path.join(self.root, i) for i in relative_im_paths]
# index = 0
# print('getting imgs...')
# with tqdm(total=len(torchvision.datasets.ImageFolder(root=self.root).imgs), desc=f'Loading hotels {mode}...') as t:
# print('getting imgs 2...')
# for i in torchvision.datasets.ImageFolder(root=self.root).imgs:
# # i[1]: label, i[0]: root
# y = i[0].split('/')[-2]
# # fn needed for removing non-images starting with `._`
# fn = os.path.split(i[0])[1]
# if y in self.classes and fn[:2] != '._':
# self.ys += [y]
# self.I += [index]
# self.im_paths.append(os.path.join(self.root, i[0]))
# index += 1
#
# t.update() | 1,852 | 5 | 49 |
54d7a84e8ba35c7b79d953084e0b929e2f0a3e08 | 3,147 | py | Python | plugins/redshift/redshift/lambda_sources/redshift_db_creator/redshift_functions.py | NickCorbett/aws-orbit-workbench | b598fc012d051d5390c6c0aff12a3cce254d36b5 | [
"Apache-2.0"
] | null | null | null | plugins/redshift/redshift/lambda_sources/redshift_db_creator/redshift_functions.py | NickCorbett/aws-orbit-workbench | b598fc012d051d5390c6c0aff12a3cce254d36b5 | [
"Apache-2.0"
] | null | null | null | plugins/redshift/redshift/lambda_sources/redshift_db_creator/redshift_functions.py | NickCorbett/aws-orbit-workbench | b598fc012d051d5390c6c0aff12a3cce254d36b5 | [
"Apache-2.0"
] | null | null | null | import json
import logging
import os
from typing import Any, Dict, Optional
import boto3
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
| 41.96 | 98 | 0.627582 | import json
import logging
import os
from typing import Any, Dict, Optional
import boto3
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
def lambda_handler(event: Dict[str, Any], context: Optional[Dict[str, Any]]) -> Dict[str, str]:
redshift = boto3.client("redshift")
secretsmanager = boto3.client("secretsmanager")
response = secretsmanager.get_secret_value(SecretId=os.environ["SecretId"])
passJson = json.loads(response["SecretString"])
password = passJson["password"]
user = passJson["username"]
clusterIdentifier = event["cluster_name"]
try:
clusterInfo = redshift.describe_clusters(ClusterIdentifier=clusterIdentifier)
LOGGER.error("Cluster already exists")
if not clusterInfo["Clusters"].isEmpty():
return {
"statusCode": "409",
}
# Cluster Found with Paused State, resume the cluster.
if clusterInfo["ClusterStatus"] == "paused":
resume_response = redshift.resume_cluster(ClusterIdentifier=clusterIdentifier)
if not resume_response:
return {
"statusCode": "409",
}
else:
return {
"statusCode": "200",
"cluster_id": resume_response["Cluster"]["ClusterIdentifier"],
"username": user,
"modify_status": resume_response["Cluster"]["ModifyStatus"],
}
except Exception:
LOGGER.debug("Cluster not found,good, now lets start it")
database = event["Database"] if "Database" in event else os.environ["Database"]
response = redshift.create_cluster(
DBName=database,
ClusterIdentifier=clusterIdentifier,
ClusterType=event["ClusterType"] if "ClusterType" in event else os.environ["ClusterType"],
NodeType=event["NodeType"] if "NodeType" in event else os.environ["NodeType"],
Encrypted=True,
KmsKeyId=os.environ["kms_key"],
AutomatedSnapshotRetentionPeriod=0, # Snaphot disabled
MasterUsername=user,
MasterUserPassword=password,
VpcSecurityGroupIds=[os.environ["RedshiftClusterSecurityGroup"]],
ClusterSubnetGroupName=os.environ["RedshiftClusterSubnetGroup"],
ClusterParameterGroupName=os.environ["RedshiftClusterParameterGroup"],
Port=int(event["PortNumber"] if "PortNumber" in event else os.environ["PortNumber"]),
NumberOfNodes=int(event["Nodes"] if "Nodes" in event else os.environ["Nodes"]),
PubliclyAccessible=False,
IamRoles=[os.environ["Role"]],
Tags=[
{"Key": "Product", "Value": "Orbit"},
{"Key": "SubProduct", "Value": "Redshift"},
{"Key": "Env", "Value": os.environ["Env"]},
{"Key": "TeamSpace", "Value": os.environ["TeamSpace"]},
{"Key": "MasterPasswordSecretID", "Value": os.environ["SecretId"]},
],
)
cluster_id = response["Cluster"]["ClusterIdentifier"]
print("cluster created: ", cluster_id)
return {"statusCode": "200", "cluster_id": cluster_id, "username": user}
| 2,973 | 0 | 23 |
e51138a705d78f5df1a24f0e485b77b03328856a | 1,654 | py | Python | badbaby/mmn/write_dependent_measures_data.py | pettetmw/badbaby | ed4710cc13bbbf9da3409f6ed74319ac9d4faa61 | [
"MIT"
] | null | null | null | badbaby/mmn/write_dependent_measures_data.py | pettetmw/badbaby | ed4710cc13bbbf9da3409f6ed74319ac9d4faa61 | [
"MIT"
] | null | null | null | badbaby/mmn/write_dependent_measures_data.py | pettetmw/badbaby | ed4710cc13bbbf9da3409f6ed74319ac9d4faa61 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Script writes out numpy archive of nd AUC and peak latency array data"""
# Authors: Kambiz Tavabi <ktavabi@gmail.com>
# License: MIT
from os import path as op
import time
import numpy as np
from mne import read_evokeds
from badbaby import parameters as params
from badbaby import return_dataframes as rd
# Some parameters
data_dir = params.meg_dirs['mmn']
df, cdi_df = rd.return_simms_mmn_dfs()
subjects = df.Subject_ID.values.tolist()
agency = 'SIMMS'
analysis = 'Individual'
conditions = ['standard', 'Ba', 'Wa']
lpf = 30
erf_data = np.load(op.join(data_dir,
'%s_Analysis-%s_%d-ERF-data.npz'
% (agency, analysis, lpf)))
file_out = op.join(data_dir,
'%s_Analysis-%s_%d-DepMeas-data.npz'
% (agency, analysis, lpf))
# Loop over subjects and write ND data matrix
t0 = time.time()
for ci, cond in enumerate(conditions):
print(' %s...' % cond)
for si, subj in enumerate(subjects):
print(' %s' % subj)
evoked_file = op.join(data_dir, 'bad_%s' % subj, 'inverse',
'%s_%d-sss_eq_bad_%s-ave.fif'
% (analysis, lpf, subj))
evoked = read_evokeds(evoked_file, condition=cond,
baseline=(None, 0))
if len(evoked.info['bads']) > 0:
print(' Interpolating bad channels...')
evoked.interpolate_bads()
times = evoked.times
sfreq = evoked.info['sfreq']
ch_names = evoked.info['ch_names']
assert(all(np.asarray(ch_names) == np.asarray(params.vv_ch_order))) | 35.191489 | 75 | 0.596131 | # -*- coding: utf-8 -*-
"""Script writes out numpy archive of nd AUC and peak latency array data"""
# Authors: Kambiz Tavabi <ktavabi@gmail.com>
# License: MIT
from os import path as op
import time
import numpy as np
from mne import read_evokeds
from badbaby import parameters as params
from badbaby import return_dataframes as rd
# Some parameters
data_dir = params.meg_dirs['mmn']
df, cdi_df = rd.return_simms_mmn_dfs()
subjects = df.Subject_ID.values.tolist()
agency = 'SIMMS'
analysis = 'Individual'
conditions = ['standard', 'Ba', 'Wa']
lpf = 30
erf_data = np.load(op.join(data_dir,
'%s_Analysis-%s_%d-ERF-data.npz'
% (agency, analysis, lpf)))
file_out = op.join(data_dir,
'%s_Analysis-%s_%d-DepMeas-data.npz'
% (agency, analysis, lpf))
# Loop over subjects and write ND data matrix
t0 = time.time()
for ci, cond in enumerate(conditions):
print(' %s...' % cond)
for si, subj in enumerate(subjects):
print(' %s' % subj)
evoked_file = op.join(data_dir, 'bad_%s' % subj, 'inverse',
'%s_%d-sss_eq_bad_%s-ave.fif'
% (analysis, lpf, subj))
evoked = read_evokeds(evoked_file, condition=cond,
baseline=(None, 0))
if len(evoked.info['bads']) > 0:
print(' Interpolating bad channels...')
evoked.interpolate_bads()
times = evoked.times
sfreq = evoked.info['sfreq']
ch_names = evoked.info['ch_names']
assert(all(np.asarray(ch_names) == np.asarray(params.vv_ch_order))) | 0 | 0 | 0 |
cb6ead90047b80c452cd46584ed8d9a83594b0b3 | 3,704 | py | Python | MachineLearning/Random Forest/tree.py | chenxygx/PythonSimple | bd4c86d8bc8064fa62f7a2ec3040d1e1c90252c3 | [
"Apache-2.0"
] | 1 | 2021-07-22T02:31:20.000Z | 2021-07-22T02:31:20.000Z | MachineLearning/Random Forest/tree.py | chenxygx/PythonSimple | bd4c86d8bc8064fa62f7a2ec3040d1e1c90252c3 | [
"Apache-2.0"
] | null | null | null | MachineLearning/Random Forest/tree.py | chenxygx/PythonSimple | bd4c86d8bc8064fa62f7a2ec3040d1e1c90252c3 | [
"Apache-2.0"
] | 1 | 2021-07-22T02:29:49.000Z | 2021-07-22T02:29:49.000Z | # coding:UTF-8
'''
Date:20161030
@author: zhaozhiyong
'''
from math import pow
class node:
'''树的节点的类
'''
def split_tree(data, fea, value):
'''根据特征fea中的值value将数据集data划分成左右子树
input: data(list):数据集
fea(int):待分割特征的索引
value(float):待分割的特征的具体值
output: (set1,set2)(tuple):分割后的左右子树
'''
set_1 = []
set_2 = []
for x in data:
if x[fea] >= value:
set_1.append(x)
else:
set_2.append(x)
return (set_1, set_2)
def label_uniq_cnt(data):
'''统计数据集中不同的类标签label的个数
input: data(list):原始数据集
output: label_uniq_cnt(int):样本中的标签的个数
'''
label_uniq_cnt = {}
for x in data:
label = x[len(x) - 1] # 取得每一个样本的类标签label
if label not in label_uniq_cnt:
label_uniq_cnt[label] = 0
label_uniq_cnt[label] = label_uniq_cnt[label] + 1
return label_uniq_cnt
def cal_gini_index(data):
'''计算给定数据集的Gini指数
input: data(list):树中
output: gini(float):Gini指数
'''
total_sample = len(data) # 样本的总个数
if len(data) == 0:
return 0
label_counts = label_uniq_cnt(data) # 统计数据集中不同标签的个数
# 计算数据集的Gini指数
gini = 0
for label in label_counts:
gini = gini + pow(label_counts[label], 2)
gini = 1 - float(gini) / pow(total_sample, 2)
return gini
def build_tree(data):
'''构建树
input: data(list):训练样本
output: node:树的根结点
'''
# 构建决策树,函数返回该决策树的根节点
if len(data) == 0:
return node()
# 1、计算当前的Gini指数
currentGini = cal_gini_index(data)
bestGain = 0.0
bestCriteria = None # 存储最佳切分属性以及最佳切分点
bestSets = None # 存储切分后的两个数据集
feature_num = len(data[0]) - 1 # 样本中特征的个数
# 2、找到最好的划分
for fea in range(0, feature_num):
# 2.1、取得fea特征处所有可能的取值
feature_values = {} # 在fea位置处可能的取值
for sample in data: # 对每一个样本
feature_values[sample[fea]] = 1 # 存储特征fea处所有可能的取值
# 2.2、针对每一个可能的取值,尝试将数据集划分,并计算Gini指数
for value in feature_values.keys(): # 遍历该属性的所有切分点
# 2.2.1、 根据fea特征中的值value将数据集划分成左右子树
(set_1, set_2) = split_tree(data, fea, value)
# 2.2.2、计算当前的Gini指数
nowGini = float(len(set_1) * cal_gini_index(set_1) + \
len(set_2) * cal_gini_index(set_2)) / len(data)
# 2.2.3、计算Gini指数的增加量
gain = currentGini - nowGini
# 2.2.4、判断此划分是否比当前的划分更好
if gain > bestGain and len(set_1) > 0 and len(set_2) > 0:
bestGain = gain
bestCriteria = (fea, value)
bestSets = (set_1, set_2)
# 3、判断划分是否结束
if bestGain > 0:
right = build_tree(bestSets[0])
left = build_tree(bestSets[1])
return node(fea=bestCriteria[0], value=bestCriteria[1], \
right=right, left=left)
else:
return node(results=label_uniq_cnt(data)) # 返回当前的类别标签作为最终的类别标签
def predict(sample, tree):
'''对每一个样本sample进行预测
input: sample(list):需要预测的样本
tree(类):构建好的分类树
output: tree.results:所属的类别
'''
# 1、只是树根
if tree.results != None:
return tree.results
else:
# 2、有左右子树
val_sample = sample[tree.fea]
branch = None
if val_sample >= tree.value:
branch = tree.right
else:
branch = tree.left
return predict(sample, branch)
| 28.060606 | 80 | 0.568575 | # coding:UTF-8
'''
Date:20161030
@author: zhaozhiyong
'''
from math import pow
class node:
'''树的节点的类
'''
def __init__(self, fea=-1, value=None, results=None, right=None, left=None):
self.fea = fea # 用于切分数据集的属性的列索引值
self.value = value # 设置划分的值
self.results = results # 存储叶节点所属的类别
self.right = right # 右子树
self.left = left # 左子树
def split_tree(data, fea, value):
'''根据特征fea中的值value将数据集data划分成左右子树
input: data(list):数据集
fea(int):待分割特征的索引
value(float):待分割的特征的具体值
output: (set1,set2)(tuple):分割后的左右子树
'''
set_1 = []
set_2 = []
for x in data:
if x[fea] >= value:
set_1.append(x)
else:
set_2.append(x)
return (set_1, set_2)
def label_uniq_cnt(data):
'''统计数据集中不同的类标签label的个数
input: data(list):原始数据集
output: label_uniq_cnt(int):样本中的标签的个数
'''
label_uniq_cnt = {}
for x in data:
label = x[len(x) - 1] # 取得每一个样本的类标签label
if label not in label_uniq_cnt:
label_uniq_cnt[label] = 0
label_uniq_cnt[label] = label_uniq_cnt[label] + 1
return label_uniq_cnt
def cal_gini_index(data):
'''计算给定数据集的Gini指数
input: data(list):树中
output: gini(float):Gini指数
'''
total_sample = len(data) # 样本的总个数
if len(data) == 0:
return 0
label_counts = label_uniq_cnt(data) # 统计数据集中不同标签的个数
# 计算数据集的Gini指数
gini = 0
for label in label_counts:
gini = gini + pow(label_counts[label], 2)
gini = 1 - float(gini) / pow(total_sample, 2)
return gini
def build_tree(data):
'''构建树
input: data(list):训练样本
output: node:树的根结点
'''
# 构建决策树,函数返回该决策树的根节点
if len(data) == 0:
return node()
# 1、计算当前的Gini指数
currentGini = cal_gini_index(data)
bestGain = 0.0
bestCriteria = None # 存储最佳切分属性以及最佳切分点
bestSets = None # 存储切分后的两个数据集
feature_num = len(data[0]) - 1 # 样本中特征的个数
# 2、找到最好的划分
for fea in range(0, feature_num):
# 2.1、取得fea特征处所有可能的取值
feature_values = {} # 在fea位置处可能的取值
for sample in data: # 对每一个样本
feature_values[sample[fea]] = 1 # 存储特征fea处所有可能的取值
# 2.2、针对每一个可能的取值,尝试将数据集划分,并计算Gini指数
for value in feature_values.keys(): # 遍历该属性的所有切分点
# 2.2.1、 根据fea特征中的值value将数据集划分成左右子树
(set_1, set_2) = split_tree(data, fea, value)
# 2.2.2、计算当前的Gini指数
nowGini = float(len(set_1) * cal_gini_index(set_1) + \
len(set_2) * cal_gini_index(set_2)) / len(data)
# 2.2.3、计算Gini指数的增加量
gain = currentGini - nowGini
# 2.2.4、判断此划分是否比当前的划分更好
if gain > bestGain and len(set_1) > 0 and len(set_2) > 0:
bestGain = gain
bestCriteria = (fea, value)
bestSets = (set_1, set_2)
# 3、判断划分是否结束
if bestGain > 0:
right = build_tree(bestSets[0])
left = build_tree(bestSets[1])
return node(fea=bestCriteria[0], value=bestCriteria[1], \
right=right, left=left)
else:
return node(results=label_uniq_cnt(data)) # 返回当前的类别标签作为最终的类别标签
def predict(sample, tree):
'''对每一个样本sample进行预测
input: sample(list):需要预测的样本
tree(类):构建好的分类树
output: tree.results:所属的类别
'''
# 1、只是树根
if tree.results != None:
return tree.results
else:
# 2、有左右子树
val_sample = sample[tree.fea]
branch = None
if val_sample >= tree.value:
branch = tree.right
else:
branch = tree.left
return predict(sample, branch)
| 319 | 0 | 26 |
18ce7bac55c6a1612d4ea60738aace84b2d4aa3a | 3,129 | py | Python | Language/python/Program3/main.py | Aryan-Bhatt-pro/hacktoberfest-2021 | 5091812a399761128791b3d4ae7934b13027ae02 | [
"MIT"
] | 4 | 2021-10-15T06:59:52.000Z | 2022-01-07T17:07:02.000Z | Language/python/Program3/main.py | Aryan-Bhatt-pro/hacktoberfest-2021 | 5091812a399761128791b3d4ae7934b13027ae02 | [
"MIT"
] | 35 | 2021-10-13T11:55:13.000Z | 2022-01-22T06:32:12.000Z | Language/python/Program3/main.py | Aryan-Bhatt-pro/hacktoberfest-2021 | 5091812a399761128791b3d4ae7934b13027ae02 | [
"MIT"
] | 16 | 2021-10-13T11:42:58.000Z | 2022-01-07T17:06:29.000Z | # Calculator using Python Tkinter
from tkinter import Tk, Entry, Button, StringVar
base = Tk()
calculator__pad = Calculator(base)
base.mainloop()
| 48.890625 | 121 | 0.583893 | # Calculator using Python Tkinter
from tkinter import Tk, Entry, Button, StringVar
class Calculator:
def __init__(self, body):
body.title("Calculator")
body.geometry("357x420+0+0")
body.config(bg='black')
self.eqn = StringVar()
self.entry_val = ''
Entry(width=19, bg='#FFFFFF', font=('Arial Bold', 32), textvariable=self.eqn).place(x=1, y=0)
Button(width = 10, height = 4, text='(', relief='flat', command = lambda:self.display__val('(')).place(x=0, y=50)
Button(width=10, height=4, text=')', relief='flat',
command=lambda: self.display__val(')')).place(x=90, y=50)
Button(width=10, height=4, text='%', relief='flat',
command=lambda: self.display__val('%')).place(x=180, y=50)
Button(width=10, height=4, text='1', relief='flat',
command=lambda: self.display__val(1)).place(x=0, y=125)
Button(width=10, height=4, text='2', relief='flat',
command=lambda: self.display__val(2)).place(x=90, y=125)
Button(width=10, height=4, text='3', relief='flat',
command=lambda: self.display__val(3)).place(x=180, y=125)
Button(width=10, height=4, text='4', relief='flat',
command=lambda: self.display__val(4)).place(x=0, y=200)
Button(width=10, height=4, text='5', relief='flat',
command=lambda: self.display__val(5)).place(x=90, y=200)
Button(width=10, height=4, text='6', relief='flat',
command=lambda: self.display__val(6)).place(x=180, y=200)
Button(width=10, height=4, text='7', relief='flat',
command=lambda: self.display__val(7)).place(x=0, y=275)
Button(width=10, height=4, text='8', relief='flat',
command=lambda: self.display__val(8)).place(x=180, y=275)
Button(width=10, height=4, text='9', relief='flat',
command=lambda: self.display__val(9)).place(x=90, y=275)
Button(width=10, height=4, text='0', relief='flat',
command=lambda: self.display__val(0)).place(x=90, y=350)
Button(width=10, height=4, text='.', relief='flat',
command=lambda: self.display__val('.')).place(x=180, y=350)
Button(width=10, height=4, text='+', relief='flat',
command=lambda: self.display__val('+')).place(x=270, y=275)
Button(width=10, height=4, text='-', relief='flat',
command=lambda: self.display__val('-')).place(x=270, y=200)
Button(width=10, height=4, text='/', relief='flat',
command=lambda: self.display__val('/')).place(x=270, y=50)
Button(width=10, height=4, text='x', relief='flat',
command=lambda: self.display__val('x')).place(x=270, y=125)
Button(width=10, height=4, text='=', relief='flat').place(x=270, y=350)
Button(width=10, height=4, text='C', relief='flat').place(x=0, y=350)
def display__val(self, val):
self.entry_val += str(val)
self.eqn.set(self.entry_val)
base = Tk()
calculator__pad = Calculator(base)
base.mainloop()
| 2,899 | -4 | 76 |
1566cf8b55df6d4c8242e44bd253a8faacee39fc | 2,182 | py | Python | src/calmjs/parse/tests/test_es5_parser.py | KristobalJunta/calmjs.parse | 0ee6a497404a38670ada1ef029a20f8e6f4499e4 | [
"MIT"
] | null | null | null | src/calmjs/parse/tests/test_es5_parser.py | KristobalJunta/calmjs.parse | 0ee6a497404a38670ada1ef029a20f8e6f4499e4 | [
"MIT"
] | null | null | null | src/calmjs/parse/tests/test_es5_parser.py | KristobalJunta/calmjs.parse | 0ee6a497404a38670ada1ef029a20f8e6f4499e4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import textwrap
import unittest
from io import StringIO
from calmjs.parse import asttypes
from calmjs.parse.parsers.es5 import Parser
from calmjs.parse.parsers.es5 import parse
from calmjs.parse.parsers.es5 import read
from calmjs.parse.unparsers.es5 import pretty_print
from calmjs.parse.walkers import walk
from calmjs.parse.tests.parser import (
ParserCaseMixin,
build_node_repr_test_cases,
build_asi_test_cases,
build_syntax_error_test_cases,
build_regex_syntax_error_test_cases,
build_comments_test_cases,
)
ParsedNodeTypeTestCase = build_node_repr_test_cases(
'ParsedNodeTypeTestCase', parse, 'ES5Program')
# ASI - Automatic Semicolon Insertion
ParserToECMAASITestCase = build_asi_test_cases(
'ParserToECMAASITestCase', parse, pretty_print)
ECMASyntaxErrorsTestCase = build_syntax_error_test_cases(
'ECMASyntaxErrorsTestCase', parse)
ECMARegexSyntaxErrorsTestCase = build_regex_syntax_error_test_cases(
'ECMARegexSyntaxErrorsTestCase', parse)
ParsedNodeTypesWithCommentsTestCase = build_comments_test_cases(
'ParsedNodeTypeWithCommentsTestCase', parse, 'ES5Program')
| 29.093333 | 75 | 0.68286 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import textwrap
import unittest
from io import StringIO
from calmjs.parse import asttypes
from calmjs.parse.parsers.es5 import Parser
from calmjs.parse.parsers.es5 import parse
from calmjs.parse.parsers.es5 import read
from calmjs.parse.unparsers.es5 import pretty_print
from calmjs.parse.walkers import walk
from calmjs.parse.tests.parser import (
ParserCaseMixin,
build_node_repr_test_cases,
build_asi_test_cases,
build_syntax_error_test_cases,
build_regex_syntax_error_test_cases,
build_comments_test_cases,
)
class ParserTestCase(unittest.TestCase, ParserCaseMixin):
parse = staticmethod(parse)
def test_modify_tree(self):
text = """
for (var i = 0; i < 10; i++) {
var x = 5 + i;
}
"""
parser = Parser()
tree = parser.parse(text)
for node in walk(tree):
if isinstance(node, asttypes.Identifier) and node.value == 'i':
node.value = 'hello'
self.assertMultiLineEqual(
str(tree),
textwrap.dedent("""
for (var hello = 0; hello < 10; hello++) {
var x = 5 + hello;
}
""").lstrip()
)
def test_read(self):
stream = StringIO('var foo = "bar";')
node = read(stream)
self.assertTrue(isinstance(node, asttypes.ES5Program))
self.assertIsNone(node.sourcepath)
stream.name = 'somefile.js'
node = read(stream)
self.assertEqual(node.sourcepath, 'somefile.js')
ParsedNodeTypeTestCase = build_node_repr_test_cases(
'ParsedNodeTypeTestCase', parse, 'ES5Program')
# ASI - Automatic Semicolon Insertion
ParserToECMAASITestCase = build_asi_test_cases(
'ParserToECMAASITestCase', parse, pretty_print)
ECMASyntaxErrorsTestCase = build_syntax_error_test_cases(
'ECMASyntaxErrorsTestCase', parse)
ECMARegexSyntaxErrorsTestCase = build_regex_syntax_error_test_cases(
'ECMARegexSyntaxErrorsTestCase', parse)
ParsedNodeTypesWithCommentsTestCase = build_comments_test_cases(
'ParsedNodeTypeWithCommentsTestCase', parse, 'ES5Program')
| 845 | 123 | 23 |
b4e8a57a756a57e9ed1650b78399ad2a58f58a04 | 290 | py | Python | Modulo_3/semana 2/areas/paquete.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
] | null | null | null | Modulo_3/semana 2/areas/paquete.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
] | null | null | null | Modulo_3/semana 2/areas/paquete.py | AutodidactaMx/cocid_python | 11628f465ff362807a692c79ede26bf30dd8e26a | [
"MIT"
] | 1 | 2022-03-04T00:57:18.000Z | 2022-03-04T00:57:18.000Z | from tkinter import *
root = Tk()
root.geometry("300x400")
btn_fill = Button(root, text="Button fill")
btn_fill.pack(fill=X)
btn_expand = Button(root, text="Button expand ")
btn_expand.pack(expand=YES)
btn_side = Button(root, text="Button side")
btn_side.pack(side=LEFT)
root.mainloop()
| 20.714286 | 48 | 0.737931 | from tkinter import *
root = Tk()
root.geometry("300x400")
btn_fill = Button(root, text="Button fill")
btn_fill.pack(fill=X)
btn_expand = Button(root, text="Button expand ")
btn_expand.pack(expand=YES)
btn_side = Button(root, text="Button side")
btn_side.pack(side=LEFT)
root.mainloop()
| 0 | 0 | 0 |
3f633ce24739c5dbc18866b7e3bb35a835971566 | 1,201 | py | Python | mlfromscratch/examples/principal_component_analysis.py | leeh8911/ML-From-Scratch | 9b9c94e2f8fbbefa60d3481c23180f1852fae506 | [
"MIT"
] | 22,453 | 2017-02-17T08:19:27.000Z | 2022-03-31T17:45:01.000Z | mlfromscratch/examples/principal_component_analysis.py | oceanofinfinity/ML-From-Scratch | a2806c6732eee8d27762edd6d864e0c179d8e9e8 | [
"MIT"
] | 75 | 2017-02-25T23:55:40.000Z | 2022-03-28T04:15:08.000Z | mlfromscratch/examples/principal_component_analysis.py | oceanofinfinity/ML-From-Scratch | a2806c6732eee8d27762edd6d864e0c179d8e9e8 | [
"MIT"
] | 4,496 | 2017-02-25T16:52:39.000Z | 2022-03-31T06:42:54.000Z | from sklearn import datasets
import matplotlib.pyplot as plt
import matplotlib.cm as cmx
import matplotlib.colors as colors
import numpy as np
from mlfromscratch.unsupervised_learning import PCA
if __name__ == "__main__":
main() | 25.553191 | 75 | 0.647794 | from sklearn import datasets
import matplotlib.pyplot as plt
import matplotlib.cm as cmx
import matplotlib.colors as colors
import numpy as np
from mlfromscratch.unsupervised_learning import PCA
def main():
# Demo of how to reduce the dimensionality of the data to two dimension
# and plot the results.
# Load the dataset
data = datasets.load_digits()
X = data.data
y = data.target
# Project the data onto the 2 primary principal components
X_trans = PCA().transform(X, 2)
x1 = X_trans[:, 0]
x2 = X_trans[:, 1]
cmap = plt.get_cmap('viridis')
colors = [cmap(i) for i in np.linspace(0, 1, len(np.unique(y)))]
class_distr = []
# Plot the different class distributions
for i, l in enumerate(np.unique(y)):
_x1 = x1[y == l]
_x2 = x2[y == l]
_y = y[y == l]
class_distr.append(plt.scatter(_x1, _x2, color=colors[i]))
# Add a legend
plt.legend(class_distr, y, loc=1)
# Axis labels
plt.suptitle("PCA Dimensionality Reduction")
plt.title("Digit Dataset")
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.show()
if __name__ == "__main__":
main() | 944 | 0 | 23 |
7d63c887dcdd3eb590bba25cb8a74327940c5818 | 1,203 | py | Python | hubspot3/workflows.py | benaduggan/hubspot3 | 534af726d803ca1aa26b6f9ab90118649c52f26d | [
"MIT"
] | 137 | 2018-02-14T14:24:12.000Z | 2022-03-31T13:34:51.000Z | hubspot3/workflows.py | benaduggan/hubspot3 | 534af726d803ca1aa26b6f9ab90118649c52f26d | [
"MIT"
] | 105 | 2018-02-14T14:25:22.000Z | 2022-01-25T15:38:06.000Z | hubspot3/workflows.py | benaduggan/hubspot3 | 534af726d803ca1aa26b6f9ab90118649c52f26d | [
"MIT"
] | 80 | 2017-10-20T12:54:46.000Z | 2022-03-17T10:36:56.000Z | """
hubspot workflows api
"""
from hubspot3.base import BaseClient
from hubspot3.utils import get_log
WORKFLOWS_API_VERSION = "3"
class WorkflowsClient(BaseClient):
"""
The hubspot3 Workflows client uses the _make_request method to call the
API for data. It returns a python object translated from the json returned
"""
def __init__(self, *args, **kwargs):
"""initialize a workflows client"""
super(WorkflowsClient, self).__init__(*args, **kwargs)
self.log = get_log("hubspot3.workflows")
def get_all_workflow_ids(self, **options):
"""
Get all workflow IDs
:see: https://developers.hubspot.com/docs/methods/workflows/v3/get_workflows
"""
return self._call("workflows", **options)
def get_workflow_by_id(self, workflow_id: int = None, **options):
"""
Get workflow specified by ID
:see: https://developers.hubspot.com/docs/methods/workflows/v3/get_workflow
"""
if workflow_id is not None:
return self._call(f"workflows/{workflow_id}")
return None
| 30.846154 | 84 | 0.66251 | """
hubspot workflows api
"""
from hubspot3.base import BaseClient
from hubspot3.utils import get_log
WORKFLOWS_API_VERSION = "3"
class WorkflowsClient(BaseClient):
"""
The hubspot3 Workflows client uses the _make_request method to call the
API for data. It returns a python object translated from the json returned
"""
def __init__(self, *args, **kwargs):
"""initialize a workflows client"""
super(WorkflowsClient, self).__init__(*args, **kwargs)
self.log = get_log("hubspot3.workflows")
def _get_path(self, subpath):
return f"automation/v{WORKFLOWS_API_VERSION}/{subpath}"
def get_all_workflow_ids(self, **options):
"""
Get all workflow IDs
:see: https://developers.hubspot.com/docs/methods/workflows/v3/get_workflows
"""
return self._call("workflows", **options)
def get_workflow_by_id(self, workflow_id: int = None, **options):
"""
Get workflow specified by ID
:see: https://developers.hubspot.com/docs/methods/workflows/v3/get_workflow
"""
if workflow_id is not None:
return self._call(f"workflows/{workflow_id}")
return None
| 72 | 0 | 27 |
62082df5194bc16c82dca9b52aab3f614b00c4c0 | 42,439 | py | Python | tests/test_statsd.py | jsiembida/bucky3 | c3dbd3d9f62325417c9d11652ce43393f7475656 | [
"Apache-2.0"
] | 5 | 2018-08-17T16:12:10.000Z | 2022-02-03T15:30:33.000Z | tests/test_statsd.py | jsiembida/bucky3 | c3dbd3d9f62325417c9d11652ce43393f7475656 | [
"Apache-2.0"
] | 1 | 2017-09-18T23:05:41.000Z | 2017-09-19T04:54:23.000Z | tests/test_statsd.py | jsiembida/bucky3 | c3dbd3d9f62325417c9d11652ce43393f7475656 | [
"Apache-2.0"
] | 4 | 2017-09-15T16:11:57.000Z | 2019-09-19T11:58:50.000Z |
import os
import io
import sys
import time
import string
import random
import pstats
import unittest
import cProfile
import itertools
import statistics
from unittest.mock import patch, MagicMock
import bucky3.statsd as statsd
if __name__ == '__main__':
unittest.main()
| 44.861522 | 120 | 0.612456 |
import os
import io
import sys
import time
import string
import random
import pstats
import unittest
import cProfile
import itertools
import statistics
from unittest.mock import patch, MagicMock
import bucky3.statsd as statsd
class RoughFloat(float):
def __eq__(self, other):
if not isinstance(other, float):
return super().__eq__(other)
return round(self, 2) == round(other, 2)
def statsd_verify(output_pipe, expected_values):
found_values = sum((i[0][0] for i in output_pipe.send.call_args_list), [])
for v in found_values:
if v in expected_values:
expected_values.remove(v)
else:
assert False, str(v) + " was not expected"
if expected_values:
assert False, "missing " + str(expected_values.pop())
output_pipe.reset_mock()
def statsd_setup(timestamps, **extra_cfg):
def run(fun, self):
with patch('time.monotonic') as monotonic_time, \
patch('time.time') as system_time:
if callable(timestamps):
system_time_mock, monotonic_time_mock = itertools.tee((t for t in timestamps()), 2)
else:
system_time_mock, monotonic_time_mock = itertools.tee(timestamps, 2)
system_time_mock, monotonic_time_mock = iter(system_time_mock), iter(monotonic_time_mock)
monotonic_time0 = next(monotonic_time_mock)
# Statsd module consumes one monotonic tick for self.init_timestamp, we need to inject it
monotonic_time_mock = itertools.chain(iter([monotonic_time0]), iter([monotonic_time0]), monotonic_time_mock)
system_time.side_effect = system_time_mock
monotonic_time.side_effect = monotonic_time_mock
cfg = dict(
# log_level=INFO triggers a log line in src module and that calls the mocked system_time
# which consumes one tick and fails the tests. So up the log_level, really ugly.
log_level='WARN',
flush_interval=1,
add_timestamps=True,
timers_bucket="stats_timers",
histograms_bucket="stats_histograms",
sets_bucket="stats_sets",
gauges_bucket="stats_gauges",
counters_bucket="stats_counters",
destination_modules=(),
)
cfg.update(**extra_cfg)
output_pipe = MagicMock()
statsd_module = statsd.StatsDServer('statsd_test', cfg, [output_pipe])
statsd_module.init_cfg()
expected_output = fun(self, statsd_module)
if expected_output is None:
return
statsd_module.tick()
statsd_verify(output_pipe, expected_output)
if callable(timestamps):
fun = timestamps
timestamps = None
return lambda self: run(fun, self)
else:
def wrapper(fun):
return lambda self: run(fun, self)
return wrapper
def single_histogram_1_bucket(x):
if x < 300: return 'under_300'
def single_histogram_3_buckets(x):
if x < 100: return 'under_100'
if x < 300: return 'under_300'
return 'over_300'
def single_histogram_10_buckets(x):
if x < 100: return 'under_100'
if x < 200: return 'under_200'
if x < 300: return 'under_300'
if x < 400: return 'under_400'
if x < 500: return 'under_500'
if x < 600: return 'under_600'
if x < 700: return 'under_700'
if x < 800: return 'under_800'
if x < 900: return 'under_900'
return 'over_900'
def multiple_histogram_selector(key):
def gorm_selector(x):
if x < 100: return 'gorm_under_100'
return 'gorm_over_100'
def gurm_selector(x):
if x < 300: return 'gurm_under_300'
if x < 1000: return 'gurm_under_1000'
return 'gurm_over_1000'
if key['name'] == 'gorm': return gorm_selector
if key['name'] == 'gurm': return gurm_selector
class TestStatsDServer(unittest.TestCase):
def malformed_entries(self, statsd_module, entry_type, check_numeric=True, check_rate=False):
mock_pipe = statsd_module.dst_pipes[0]
def test(s):
statsd_module.handle_packet(s.encode("utf-8"))
statsd_module.tick()
assert not mock_pipe.called
assert not mock_pipe.send.called
mock_pipe.reset_mock()
test(":1|" + entry_type)
test("_gorm:1|" + entry_type)
test("g.o.r.m:1|" + entry_type)
test("gorm:|" + entry_type)
if check_numeric:
test("gorm:abc|" + entry_type)
if check_rate:
test("gorm:1|" + entry_type + "|@")
test("gorm:1|" + entry_type + "|@0")
test("gorm:1|" + entry_type + "|@1.1")
test("gorm:1|" + entry_type + "|@-0.3")
def malformed_metadata(self, statsd_module, entry):
mock_pipe = statsd_module.dst_pipes[0]
legal_name_chars = string.ascii_letters
illegal_name_chars = string.punctuation.replace('_', '').replace(':', '').replace('=', '')
illegal_value_chars = ','
legal_value_chars = ''.join(
set(string.ascii_letters + string.punctuation + string.digits + ' ') - set(',')
)
def get_random_word(chars, min_len=1, max_len=5):
return ''.join(random.choice(chars) for i in range(random.randint(min_len, max_len)))
def get_token(first_chars, legal_chars, illegal_char=None):
n = get_random_word(first_chars, 1, 1) + get_random_word(legal_chars)
if illegal_char:
n = n + get_random_word(illegal_char, 1, 1) + get_random_word(legal_chars)
return n
i = 0
for c in illegal_name_chars:
name = get_token(legal_name_chars, legal_name_chars, c)
value = get_token(legal_value_chars, legal_value_chars)
statsd_module.handle_line(i, entry + '|#' + name + '=' + value)
statsd_module.tick()
assert not mock_pipe.called, "Failed to k=" + name + " and v=" + value
assert not mock_pipe.send.called, "Failed to k=" + name + " and v=" + value
mock_pipe.reset_mock()
i += 1
for c in illegal_value_chars:
name = get_token(legal_name_chars, legal_name_chars)
value = get_token(legal_value_chars, legal_value_chars, c)
statsd_module.handle_line(i, entry + '|#' + name + '=' + value)
statsd_module.tick()
assert not mock_pipe.called
assert not mock_pipe.send.called
mock_pipe.reset_mock()
i += 1
def timestamped_metadata(self, statsd_module, entry):
mock_pipe = statsd_module.dst_pipes[0]
def test(condition, s):
statsd_module.handle_packet((entry + "|#timestamp=" + s).encode("ascii"))
statsd_module.tick()
assert not mock_pipe.called
assert mock_pipe.send.called == condition
mock_pipe.reset_mock()
test(False, "")
test(False, "not-a-timestamp")
test(False, "-1000") # Beyond 10min window
test(False, "1000") # Beyond 10min window
test(True, "-123") # Within 10min window
test(True, "123.4") # Within 10min window
def bucketed_metadata(self, statsd_module, entry, expected_metadata_size=2):
mock_pipe = statsd_module.dst_pipes[0]
def test(condition, s):
statsd_module.handle_packet((entry + "|#hello=world,bucket=" + s).encode("ascii"))
statsd_module.tick()
assert not mock_pipe.called
assert mock_pipe.send.called == condition
if condition:
args, kwargs = mock_pipe.send.call_args
assert len(args) == 1
payload = args[0]
assert len(payload) == 1
payload = payload[0]
assert payload[0] == s
assert len(payload[3]) == expected_metadata_size
mock_pipe.reset_mock()
test(False, "")
test(False, "not-a-bucket-name")
test(True, "valid_bucket_name")
@statsd_setup(timestamps=(2, 4, 6, 8, 10, 12, 14))
def test_counters(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:1.5|c")
statsd_module.handle_line(0, "gurm:1|c|@0.1")
statsd_module.handle_line(0, "gorm:3|c")
statsd_module.handle_line(0, "gorm:0.5|c")
statsd_module.handle_line(0, "form:10|c|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=2.5, count=5), 2, dict(name='gorm')),
('stats_counters', dict(rate=5.0, count=10), 2, dict(name='gurm')),
('stats_counters', dict(rate=25.0, count=50), 2, dict(name='form'))
])
statsd_module.handle_line(2, "gorm:1|c")
statsd_module.handle_line(2, "gurm:1.3|c|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=0.5, count=1), 4, dict(name='gorm')),
('stats_counters', dict(rate=3.25, count=6.5), 4, dict(name='gurm'))
])
statsd_module.handle_line(4, "gurm:3|c|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=7.5, count=15), 6, dict(name='gurm'))
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 100))
def test_counters_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:1.5|c")
statsd_module.handle_line(0, "gorm:2.0|c|#a=b")
statsd_module.handle_line(0, "gorm:2.5|c|#a=b,c=5")
statsd_module.handle_line(0, "gorm:3.0|c|#a=z,c=5")
statsd_module.handle_line(0, "gorm:3.5|c|#c=5,a=b")
statsd_module.handle_line(0, "pi:3.14|c|#a=,b=c")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=1.5, count=1.5), 1, dict(name='gorm')),
('stats_counters', dict(rate=2.0, count=2.0), 1, dict(name='gorm', a='b')),
('stats_counters', dict(rate=6.0, count=6.0), 1, dict(name='gorm', a='b', c='5')),
('stats_counters', dict(rate=3.0, count=3.0), 1, dict(name='gorm', a='z', c='5')),
('stats_counters', dict(rate=3.14, count=3.14), 1, dict(name='pi', a='', b='c')),
])
statsd_module.handle_line(1, "gorm:4.0|c|#c=5,a=z")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=4.0, count=4.0), 2, dict(name='gorm', a='z', c='5')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_counters(self, statsd_module):
self.malformed_entries(statsd_module, 'c', check_rate=True)
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_counters_metadata(self, statsd_module):
self.malformed_metadata(statsd_module, "gorm:1|c")
@statsd_setup(timestamps=range(1, 1000))
def test_timestamped_counters_metadata(self, statsd_module):
self.timestamped_metadata(statsd_module, "gorm:1|c")
@statsd_setup(timestamps=range(1, 1000))
def test_bucketed_counters_metadata(self, statsd_module):
self.bucketed_metadata(statsd_module, "gorm:1|c")
@statsd_setup(timestamps=(1, 2, 3, 4, 5, 6, 7, 8))
def test_gauges(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:6.7|g")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_gauges', dict(value=6.7), 1, dict(name='gorm'))
])
statsd_module.handle_line(1, "gorm:3|g|@0.5")
statsd_module.handle_line(1, "gorm:8.1|g")
statsd_module.handle_line(1, "gurm:123|g|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_gauges', dict(value=8.1), 2, dict(name='gorm')),
('stats_gauges', dict(value=123), 2, dict(name='gurm'))
])
statsd_module.handle_line(2, "gurm:12|g|@0.5")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_gauges', dict(value=12), 3, dict(name='gurm')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 100))
def test_gauges_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:1.5|g")
statsd_module.handle_line(0, "gorm:2.0|g|#a=b")
statsd_module.handle_line(0, "gorm:2.5|g|#a=b,c=5")
statsd_module.handle_line(0, "gorm:3.0|g|#a=z,c=5")
statsd_module.handle_line(0, "gorm:3.5|g|#c=5,a=b")
statsd_module.handle_line(0, "pi:3.14|g|#a=,b=c")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_gauges', dict(value=1.5), 1, dict(name='gorm')),
('stats_gauges', dict(value=2.0), 1, dict(name='gorm', a='b')),
('stats_gauges', dict(value=3.5), 1, dict(name='gorm', a='b', c='5')),
('stats_gauges', dict(value=3.0), 1, dict(name='gorm', a='z', c='5')),
('stats_gauges', dict(value=3.14), 1, dict(name='pi', a='', b='c')),
])
statsd_module.handle_line(1, "gorm:4.0|g|#c=5,a=z")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_gauges', dict(value=4.0), 2, dict(name='gorm', a='z', c='5')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_gauges(self, statsd_module):
self.malformed_entries(statsd_module, 'g')
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_gauges_metadata(self, statsd_module):
self.malformed_metadata(statsd_module, "gorm:1|g")
@statsd_setup(timestamps=range(1, 1000))
def test_timestamped_gauges_metadata(self, statsd_module):
self.timestamped_metadata(statsd_module, "gorm:1|g")
@statsd_setup(timestamps=range(1, 1000))
def test_bucketed_gauges_metadata(self, statsd_module):
self.bucketed_metadata(statsd_module, "gorm:1|g")
@statsd_setup(timestamps=(1, 2, 3, 4, 5, 6, 7, 8))
def test_sets(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:abc|s|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_sets', dict(count=1.0), 1, dict(name='gorm'))
])
statsd_module.handle_line(1, "gurm:x|s")
statsd_module.handle_line(1, "gurm:y|s|@0.2")
statsd_module.handle_line(1, "gurm:z|s|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_sets', dict(count=3.0), 2, dict(name='gurm'))
])
statsd_module.handle_line(2, "gurm:y|s|@0.2")
statsd_module.handle_line(2, "gurm:y|s")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_sets', dict(count=1.0), 3, dict(name='gurm'))
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 100))
def test_sets_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:p|s")
statsd_module.handle_line(0, "gorm:q|s|#a=b")
statsd_module.handle_line(0, "gorm:r|s|#a=b,c=5")
statsd_module.handle_line(0, "gorm:s|s|#a=z,c=5")
statsd_module.handle_line(0, "gorm:t|s|#c=5,a=b")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_sets', dict(count=1), 1, dict(name='gorm')),
('stats_sets', dict(count=1), 1, dict(name='gorm', a='b')),
('stats_sets', dict(count=2), 1, dict(name='gorm', a='b', c='5')),
('stats_sets', dict(count=1), 1, dict(name='gorm', a='z', c='5')),
])
statsd_module.handle_line(1, "gorm:u|s|#c=5,a=z")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_sets', dict(count=1), 2, dict(name='gorm', a='z', c='5')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_sets(self, statsd_module):
self.malformed_entries(statsd_module, 's', check_numeric=False)
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_sets_metadata(self, statsd_module):
self.malformed_metadata(statsd_module, "gorm:x|s")
@statsd_setup(timestamps=range(1, 1000))
def test_timestamped_sets_metadata(self, statsd_module):
self.timestamped_metadata(statsd_module, "gorm:x|s")
@statsd_setup(timestamps=range(1, 1000))
def test_bucketed_sets_metadata(self, statsd_module):
self.bucketed_metadata(statsd_module, "gorm:x|s")
@statsd_setup(flush_interval=0.1,
percentile_thresholds=(90,),
timestamps=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7))
def test_single_timer_sample(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:100|ms")
expected_value = {
"mean": 100.0,
"upper": 100.0,
"lower": 100.0,
"count": 1,
"count_ps": 10.0,
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 0.1, dict(name='gorm', percentile='90.0'))
])
statsd_module.handle_line(0.1, "gorm:100|ms")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 0.2, dict(name='gorm', percentile='90.0'))
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(flush_interval=0.1,
percentile_thresholds=(90,),
timestamps=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7))
def test_timer_samples1(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:100|ms")
statsd_module.handle_line(0, "gorm:200|ms|@0.2")
statsd_module.handle_line(0, "gorm:300|ms") # Out of the 90% threshold
expected_value = {
"mean": 150,
"lower": 100,
"upper": 200,
"count": 2,
"count_ps": 20,
"stdev": 70.71067811865476
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 0.1, dict(name='gorm', percentile='90.0'))
])
@statsd_setup(percentile_thresholds=(90,),
timestamps=(0.5, 1.0, 1.5, 2.0, 2.5, 3.0))
def test_timer_samples2(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
for i in range(9):
statsd_module.handle_line(0, "gorm:1|ms")
statsd_module.handle_line(0, "gorm:2|ms") # Out of the 90% threshold
expected_value = {
"mean": 1,
"lower": 1,
"upper": 1,
"count": 9,
"count_ps": 18.0,
"stdev": 0.0
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 0.5, dict(name='gorm', percentile='90.0'))
])
@statsd_setup(percentile_thresholds=(90,),
timestamps=(0.5, 1.0, 1.5, 2.0, 2.5, 3.0))
def test_timer_samples3(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:2|ms")
statsd_module.handle_line(0, "gorm:5|ms")
statsd_module.handle_line(0, "gorm:7|ms") # Out of the 90% threshold
statsd_module.handle_line(0, "gorm:3|ms")
expected_value = {
"mean": 10 / 3.0,
"lower": 2,
"upper": 5,
"count": 3,
"count_ps": 6,
"stdev": 1.5275252316519463
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 0.5, dict(name='gorm', percentile='90.0'))
])
_percentile_thresholds = (10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 97, 98, 99, 99.9, 100)
@statsd_setup(timestamps=range(1, 100), percentile_thresholds=_percentile_thresholds)
def test_timer_large_series(self, statsd_module):
test_name = 'gorm'
test_vector = self.rand_vec(length=3000)
for sample in test_vector:
statsd_module.handle_line(0, test_name + ":" + str(sample) + "|ms")
statsd_module.tick()
test_vector.sort()
expected_values = []
for threshold_v in self._percentile_thresholds:
threshold_i = len(test_vector) if threshold_v == 100 else (threshold_v * len(test_vector)) // 100
threshold_slice = test_vector[:int(threshold_i)]
expected_value = {
"mean": RoughFloat(statistics.mean(threshold_slice)),
"upper": RoughFloat(max(threshold_slice)),
"lower": RoughFloat(min(threshold_slice)),
"count": len(threshold_slice),
"count_ps": len(threshold_slice),
"stdev": RoughFloat(statistics.stdev(threshold_slice))
}
expected_values.append(('stats_timers', expected_value, 1,
dict(name=test_name, percentile=str(float(threshold_v)))))
statsd_verify(statsd_module.dst_pipes[0], expected_values)
@statsd_setup(timestamps=range(1, 100), percentile_thresholds=(100,))
def test_timers_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
expected_value = {
"mean": 100.0,
"upper": 100.0,
"lower": 100.0,
"count": 1,
"count_ps": 1.0,
}
expected_value2 = expected_value.copy()
expected_value2.update(count=2, count_ps=2.0, stdev=0.0)
statsd_module.handle_line(0, "gorm:100|ms")
statsd_module.handle_line(0, "gorm:100|ms|#a=b")
statsd_module.handle_line(0, "gorm:100|ms|#a=b,c=5")
statsd_module.handle_line(0, "gorm:100|ms|#a=z,c=5")
statsd_module.handle_line(0, "gorm:100|ms|#c=5,a=b")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 1, dict(name='gorm', percentile='100.0')),
('stats_timers', expected_value, 1, dict(name='gorm', a='b', percentile='100.0')),
('stats_timers', expected_value2, 1, dict(name='gorm', a='b', c='5', percentile='100.0')),
('stats_timers', expected_value, 1, dict(name='gorm', a='z', c='5', percentile='100.0')),
])
statsd_module.handle_line(1, "gorm:100|ms|#a=b,c=5")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 2, dict(name='gorm', a='b', c='5', percentile='100.0')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_timers(self, statsd_module):
self.malformed_entries(statsd_module, 'ms')
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_timers_metadata(self, statsd_module):
self.malformed_metadata(statsd_module, "gorm:1|ms")
@statsd_setup(timestamps=range(1, 1000), percentile_thresholds=(100,))
def test_timestamped_timers_metadata(self, statsd_module):
self.timestamped_metadata(statsd_module, "gorm:1|ms")
@statsd_setup(timestamps=range(1, 1000), percentile_thresholds=(100,))
def test_bucketed_timers_metadata(self, statsd_module):
self.bucketed_metadata(statsd_module, "gorm:1|ms", expected_metadata_size=3)
@statsd_setup(flush_interval=0.1,
timestamps=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7),
histogram_selector=lambda key: lambda x: 'test_histogram',)
def test_histogram_samples1(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:100|h")
expected_value = {
"mean": 100,
"lower": 100,
"upper": 100,
"count": 1,
"count_ps": 10,
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_histograms', expected_value, 0.1, dict(name='gorm', histogram='test_histogram'))
])
@statsd_setup(flush_interval=0.1,
timestamps=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7),
histogram_selector=lambda key: lambda x: 'test_histogram', )
def test_histogram_samples2(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:100|h")
statsd_module.handle_line(0, "gorm:200|h|@0.2")
statsd_module.handle_line(0, "gorm:300|h")
expected_value = {
"mean": 200,
"lower": 100,
"upper": 300,
"count": 3,
"count_ps": 30,
"stdev": 100.0
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_histograms', expected_value, 0.1, dict(name='gorm', histogram='test_histogram'))
])
@statsd_setup(flush_interval=0.1,
timestamps=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7),
histogram_selector=multiple_histogram_selector)
def test_histogram_large_series(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
test_samples = dict(gorm={}, gurm={}, foo={})
for i in range(3000):
name = random.choice(tuple(test_samples.keys()))
value = random.randint(0, 1500)
statsd_module.handle_line(0, name + ":" + str(value) + "|h")
selector = multiple_histogram_selector(dict(name=name))
if not selector:
continue
bucket = selector(value)
if bucket:
test_samples[name].setdefault(bucket, []).append(value)
break
expected_values = []
for name, d in test_samples.items():
for k, v in d.items():
expected_value = {
"mean": RoughFloat(statistics.mean(v)),
"lower": min(v),
"upper": max(v),
"count": len(v),
"count_ps": len(v) * 10,
}
if len(v) > 1:
expected_value['stdev'] = RoughFloat(statistics.stdev(v))
expected_values.append(
('stats_histograms', expected_value, 0.1, dict(name=name, histogram=k))
)
statsd_module.tick()
statsd_verify(mock_pipe, expected_values)
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_histograms(self, statsd_module):
self.malformed_entries(statsd_module, 'h')
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_histograms_metadata(self, statsd_module):
self.malformed_metadata(statsd_module, "gorm:1|h")
@statsd_setup(timestamps=range(1, 1000), percentile_thresholds=(100,),
histogram_selector=lambda key: lambda x: 'test_histogram',)
def test_timestamped_histograms_metadata(self, statsd_module):
self.timestamped_metadata(statsd_module, "gorm:1|h")
@statsd_setup(timestamps=range(1, 1000), percentile_thresholds=(),
histogram_selector=lambda key: lambda x: 'test_histogram',)
def test_bucketed_histograms_metadata(self, statsd_module):
self.bucketed_metadata(statsd_module, "gorm:1|h", expected_metadata_size=3)
@statsd_setup(timestamps=range(1, 1000))
def test_commas(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "foo:1|c|#hello=world,")
statsd_module.handle_line(0, "foo:1|c|#hello=world,,,more=metadata,")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=1, count=1), 1, dict(name='foo', hello='world')),
])
def prepare_performance_test(self):
flag = os.environ.get('TEST_PERFORMANCE', 'no').lower()
test_requested = flag in ('yes', 'true', '1')
if not test_requested:
self.skipTest("Performance test not requested")
return None
flag = os.environ.get('PROFILE_PERFORMANCE', 'no').lower()
profiler_requested = flag in ('yes', 'true', '1')
return cProfile.Profile() if profiler_requested else None
def close_performance_test(self, profiler):
if profiler:
buf = io.StringIO()
stats = pstats.Stats(profiler, stream=buf).sort_stats('cumulative')
stats.print_stats(0.1)
print(buf.getvalue())
def rand_str(self, min_len=3, max_len=10, chars=string.ascii_lowercase):
return ''.join(random.choice(chars) for i in range(random.randint(min_len, max_len)))
def rand_num(self, min_len=1, max_len=3):
return self.rand_str(min_len, max_len, string.digits)
def rand_val(self, mean=None):
if mean is None:
mean = 10
return round(min(max(0, random.gauss(mean, mean / 10)), 2 * mean), 3)
def rand_vec(self, length=None, mean=None):
if length is None:
length = random.randint(10, 100)
return list(self.rand_val(mean) for i in range(length))
def metadata_test_set(self, metric_type, set_size, tags_per_sample):
buf = set()
while len(buf) < set_size:
if tags_per_sample > 0:
tags_str = ','.join(self.rand_str() + '=' + self.rand_str() for i in range(tags_per_sample))
else:
tags_str = ''
l = self.rand_str() + ':' + self.rand_num() + '|' + metric_type
if random.random() > 0.5:
l = l + '|@{:.1f}'.format(random.random())
if tags_str:
l = l + '|#' + tags_str
buf.add(l)
return buf
def metadata_performance(self, statsd_module, prefix, metric_type, N, M, set_size, tags_per_sample, profiler=None):
mock_pipe = statsd_module.dst_pipes[0]
test_sample_set = self.metadata_test_set(metric_type, set_size, tags_per_sample)
insertion_time = 0
aggregation_time = 0
t = 0
for i in range(N):
start_timestamp = time.process_time()
for j in range(M):
for sample in test_sample_set:
if profiler:
profiler.enable()
statsd_module.handle_line(t, sample)
if profiler:
profiler.disable()
insertion_timestamp = time.process_time()
if profiler:
profiler.enable()
statsd_module.tick()
if profiler:
profiler.disable()
aggregation_timestamp = time.process_time()
t += 1
mock_pipe.reset_mock()
insertion_time += (insertion_timestamp - start_timestamp)
aggregation_time += (aggregation_timestamp - insertion_timestamp)
total_samples = N * M * len(test_sample_set)
us_per_insertion = 1000000 * insertion_time / total_samples
us_per_aggregation = 1000000 * aggregation_time / total_samples
print(('\n{prefix}: {total_samples:d} samples in {total_time:.2f}s'
' -> insertion {us_per_insertion:.2f}us/sample'
' -> aggregation {us_per_aggregation:.2f}us/sample').format(
prefix=prefix, total_samples=total_samples, total_time=(insertion_time + aggregation_time),
us_per_insertion=us_per_insertion, us_per_aggregation=us_per_aggregation,
), flush=True, file=sys.stderr)
@statsd_setup(timestamps=range(1, 10000000))
def test_counters_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "counters without tags", 'c', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "counters with 3 tags", 'c', 100, 10, 1000, 3, prof)
self.metadata_performance(statsd_module, "counters with 10 tags", 'c', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000))
def test_gauges_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "gauges without tags", 'g', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "gauges with 3 tags", 'g', 100, 10, 1000, 3, prof)
self.metadata_performance(statsd_module, "gauges with 10 tags", 'g', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000))
def test_sets_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "sets without tags", 's', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "sets with 3 tags", 's', 100, 10, 1000, 3, prof)
self.metadata_performance(statsd_module, "sets with 10 tags", 's', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(90, 99))
def test_timers_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "timers without tags", 'ms', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "timers with 3 tags", 'ms', 100, 10, 1000, 3, prof)
self.metadata_performance(statsd_module, "timers with 10 tags", 'ms', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(90, 99),
histogram_selector=lambda key: single_histogram_1_bucket)
def test_histograms_performance1(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "histogram with 1 bucket, no tags", 'h', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "histogram with 1 bucket, 10 tags", 'h', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(90, 99),
histogram_selector=lambda key: single_histogram_3_buckets)
def test_histograms_performance3(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "histogram with 3 buckets, no tags", 'h', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "histogram with 3 buckets, 10 tags", 'h', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(90, 99),
histogram_selector=lambda key: single_histogram_10_buckets)
def test_histograms_performance10(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "histogram with 10 buckets, no tags", 'h', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "histogram with 10 buckets, 10 tags", 'h', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
def percentile_test_set(self, length, N=1):
buf = []
for i in range(N):
name = ('name', self.rand_str(min_len=10, max_len=10))
vector = self.rand_vec(length=length)
buf.append((tuple((name,),), vector))
return buf
def percentiles_performance(self, statsd_module, prefix, vector_len, N, M, profiler=None):
total_time, test_set = 0, self.percentile_test_set(vector_len, N)
for i in range(M):
statsd_module.buffer_metric = lambda bucket, stats, timestamp, metadata: None
statsd_module.timers.clear()
statsd_module.timers.update((k, (8, v)) for k, v in test_set)
statsd_module.last_timestamp = 0
statsd_module.current_timestamp = 10
start_time = time.process_time()
if profiler:
profiler.enable()
statsd_module.enqueue_timers(10)
if profiler:
profiler.disable()
time_delta = time.process_time() - start_time
total_time += time_delta
total_samples = N * M * vector_len
us_per_sample = 1000000 * total_time / total_samples
print('\n{prefix}: {total_samples:d} samples in {time_delta:.2f}s -> {us_per_sample:.1f}us/sample'.format(
prefix=prefix, total_samples=total_samples, time_delta=time_delta, us_per_sample=us_per_sample
), flush=True, file=sys.stderr)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(90,))
def test_1percentile_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.percentiles_performance(statsd_module, "1 percentile, 10000 vectors of 10 samples", 10, 10000, 10, prof)
self.percentiles_performance(statsd_module, "1 percentile, 1000 vectors of 100 samples", 100, 1000, 10, prof)
self.percentiles_performance(statsd_module, "1 percentile, 100 vectors of 1000 samples", 1000, 100, 10, prof)
self.percentiles_performance(statsd_module, "1 percentile, 10 vectors of 10000 samples", 10000, 10, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(50, 90, 99))
def test_3percentiles_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.percentiles_performance(statsd_module, "3 percentiles, 10000 vectors of 10 samples", 10, 10000, 10, prof)
self.percentiles_performance(statsd_module, "3 percentiles, 1000 vectors of 100 samples", 100, 1000, 10, prof)
self.percentiles_performance(statsd_module, "3 percentiles, 100 vectors of 1000 samples", 1000, 100, 10, prof)
self.percentiles_performance(statsd_module, "3 percentiles, 10 vectors of 10000 samples", 10000, 10, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(10, 20, 30, 40, 50, 60, 70, 80, 90, 100))
def test_10percentiles_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.percentiles_performance(statsd_module, "10 percentiles, 10000 vectors of 10 samples", 10, 10000, 10, prof)
self.percentiles_performance(statsd_module, "10 percentiles, 1000 vectors of 100 samples", 100, 1000, 10, prof)
self.percentiles_performance(statsd_module, "10 percentiles, 100 vectors of 1000 samples", 1000, 100, 10, prof)
self.percentiles_performance(statsd_module, "10 percentiles, 10 vectors of 10000 samples", 10000, 10, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 100))
def test_datadog_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:1.5|c")
statsd_module.handle_line(0, "gorm:2.0|c|#a:b")
statsd_module.handle_line(0, "gorm:2.5|c|#a:b,c:5")
statsd_module.handle_line(0, "gorm:3.0|c|#a:z,c:5")
statsd_module.handle_line(0, "gorm:3.5|c|#c:5,a:b")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=1.5, count=1.5), 1, dict(name='gorm')),
('stats_counters', dict(rate=2.0, count=2.0), 1, dict(name='gorm', a='b')),
('stats_counters', dict(rate=6.0, count=6.0), 1, dict(name='gorm', a='b', c='5')),
('stats_counters', dict(rate=3.0, count=3.0), 1, dict(name='gorm', a='z', c='5')),
])
statsd_module.handle_line(1, "gorm:4.0|c|#c:5,a:z")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=4.0, count=4.0), 2, dict(name='gorm', a='z', c='5')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 100))
def test_escaped_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:1.5|c")
statsd_module.handle_line(0, "gorm:2.0|c|#a=bcd")
statsd_module.handle_line(0, r"gorm:2.5|c|#a=b\c\d")
statsd_module.handle_line(0, r"gorm:3.5|c|#a=b\,c=d")
statsd_module.handle_line(0, r"gorm:5.5|c|#a=b\,,c=d")
statsd_module.handle_line(0, r"gorm:7.5|c|#a=b\nc,d=e")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=1.5, count=1.5), 1, dict(name='gorm')),
('stats_counters', dict(rate=4.5, count=4.5), 1, dict(name='gorm', a='bcd')),
('stats_counters', dict(rate=3.5, count=3.5), 1, dict(name='gorm', a='b,c=d')),
('stats_counters', dict(rate=5.5, count=5.5), 1, dict(name='gorm', a='b,', c='d')),
('stats_counters', dict(rate=7.5, count=7.5), 1, dict(name='gorm', a='b\nc', d='e')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 100))
def test_case_sensitivity(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:2.0|c|#a=bcd")
statsd_module.handle_line(0, "goRM:1.0|c|#a=BCD")
statsd_module.handle_line(0, "gorm:2.5|c|#A=bcd")
statsd_module.handle_line(0, "gorm:3.5|c|#a=Bcd")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=2.0, count=2.0), 1, dict(name='gorm', a='bcd')),
('stats_counters', dict(rate=1.0, count=1.0), 1, dict(name='goRM', a='BCD')),
('stats_counters', dict(rate=2.5, count=2.5), 1, dict(name='gorm', A='bcd')),
('stats_counters', dict(rate=3.5, count=3.5), 1, dict(name='gorm', a='Bcd')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
if __name__ == '__main__':
unittest.main()
| 36,284 | 5,660 | 210 |
932f98d94c426702b3a7fbbdcaf125c433d73654 | 314 | py | Python | python/collections/q4_py_collections_ordereddict.py | mxdzi/hackerrank | 4455f73e4479a4204b2e1167253f6a02351aa5b7 | [
"MIT"
] | null | null | null | python/collections/q4_py_collections_ordereddict.py | mxdzi/hackerrank | 4455f73e4479a4204b2e1167253f6a02351aa5b7 | [
"MIT"
] | null | null | null | python/collections/q4_py_collections_ordereddict.py | mxdzi/hackerrank | 4455f73e4479a4204b2e1167253f6a02351aa5b7 | [
"MIT"
] | null | null | null | from collections import OrderedDict
if __name__ == '__main__':
main()
| 20.933333 | 53 | 0.589172 | from collections import OrderedDict
def main():
items = OrderedDict()
for _ in range(int(input())):
name, price = input().rsplit(' ', 1)
items[name] = items.get(name, 0) + int(price)
for name, price in items.items():
print(name, price)
if __name__ == '__main__':
main()
| 214 | 0 | 23 |
84650fca7b37776be522689b53cf77fe4aa94b47 | 1,584 | py | Python | venv/lib/python2.7/site-packages/astroid/test/unittest_python3.py | mutaihillary/mycalculator | 55685dd7c968861f18ae0701129f5af2bc682d67 | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/astroid/test/unittest_python3.py | mutaihillary/mycalculator | 55685dd7c968861f18ae0701129f5af2bc682d67 | [
"MIT"
] | 7 | 2021-02-08T20:22:15.000Z | 2022-03-11T23:19:41.000Z | venv/lib/python2.7/site-packages/astroid/test/unittest_python3.py | mutaihillary/mycalculator | 55685dd7c968861f18ae0701129f5af2bc682d67 | [
"MIT"
] | null | null | null | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
import sys
from logilab.common.testlib import TestCase, unittest_main, require_version
from astroid.node_classes import Assign
from astroid.manager import AstroidManager
from astroid.builder import AstroidBuilder
if __name__ == '__main__':
unittest_main()
| 36 | 85 | 0.731692 | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
import sys
from logilab.common.testlib import TestCase, unittest_main, require_version
from astroid.node_classes import Assign
from astroid.manager import AstroidManager
from astroid.builder import AstroidBuilder
class Python3TC(TestCase):
def setUp(self):
self.manager = AstroidManager()
self.builder = AstroidBuilder(self.manager)
self.manager.astroid_cache.clear()
@require_version('3.0')
def test_starred_notation(self):
astroid = self.builder.string_build("*a, b = [1, 2, 3]", 'test', 'test')
# Get the star node
node = next(next(next(astroid.get_children()).get_children()).get_children())
self.assertTrue(isinstance(node.ass_type(), Assign))
if __name__ == '__main__':
unittest_main()
| 399 | 86 | 23 |
a61790a04606036ab55bd68b11ead0555b7b1d88 | 7,438 | py | Python | shop_visitors/main.py | muhis/Shop-visitor-Mixpannel-workshop | b0fd2b31e7034b673f8f45e686b6340af3ae2f43 | [
"MIT"
] | null | null | null | shop_visitors/main.py | muhis/Shop-visitor-Mixpannel-workshop | b0fd2b31e7034b673f8f45e686b6340af3ae2f43 | [
"MIT"
] | null | null | null | shop_visitors/main.py | muhis/Shop-visitor-Mixpannel-workshop | b0fd2b31e7034b673f8f45e686b6340af3ae2f43 | [
"MIT"
] | null | null | null | from weighted_random import random_choice
import requests
import random
import uuid
import json
import logging
from ipaddress import IPv4Address, AddressValueError
from mixpanel import Mixpanel
from constants import *
from typing import List, ClassVar, Any, Optional
import sys
import threading
from random_user import generate_random_user_properties
# Logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def init_mixpannel_clients(mxp_tokens: List[str]) -> List[Mixpanel]:
"""
Return a list of mixpannel clients.
"""
projects: List[Mixpanel] = []
logger.info('Found %s Mixpannel tokens.', len(mxp_tokens))
for project_token in mxp_tokens:
mp = Mixpanel(project_token)
projects.append(mp)
logger.info('%s Mixpannel projects ready to go.', len(projects))
return projects
MXP_PROJECTS = init_mixpannel_clients(mxp_tokens=MIXPANNEL_TOKENS)
def generate_random_ip() ->str:
"""
Generate random IP address. Copied from
https://codereview.stackexchange.com/questions/200337/random-ip-address-generator
with some changes to generate valid looking IP addresses.
"""
while (True):
trials: int = 0
try:
trials += 1
# instances an IPv4Address object from those bits
# generates an integer with 32 random bits
bits = random.getrandbits(32)
addr = IPv4Address(bits)
except AddressValueError:
continue
if not addr.is_private or not addr.is_reserved:
break
ip_address = str(addr)
logger.info('Generated %s IP address after %s attempt', ip_address, trials)
return ip_address
class User(BaseShopper):
"""
A registered customer.
"""
@classmethod
users_pool: List[User] = []
class Visit(object):
"""
Simple customer of the website. This might be a registered user or a random unregistered user.
"""
user_journy: List[str] = []
user_cart: List[str] = []
def choose_requester(self) -> BaseShopper:
"""
Return a Shopper object
"""
self.is_registered = random_bool()
requester: BaseShopper
if self.is_registered and users_pool:
requester = random.choice(users_pool) # type: ignore
else:
requester = UnregisteredShopper()
return requester
def _visit_main_page(self):
"""
In main page, the user might visit an item page or drop.
"""
self.requester.visit('main page')
self._visit_item_page()
def _visit_item_page(self):
"""
In an item page, users can:
1. Add the item into the cart.
2. Return to main page.
3. Drop.
"""
requester_progressed = random_choice([(True, 70), (False, 30)])
if requester_progressed:
product = random_choice(SHOP_PRODUCTS)
self.requester.visit(
'Visit item page',
extra={
'item name': product
}
)
self._add_item_to_cart(product)
else:
requester_progressed = random_bool()
if requester_progressed:
# Let us assume that they need to go to home page.
self._visit_main_page()
if __name__ == '__main__':
start_script()
| 30.483607 | 98 | 0.616564 | from weighted_random import random_choice
import requests
import random
import uuid
import json
import logging
from ipaddress import IPv4Address, AddressValueError
from mixpanel import Mixpanel
from constants import *
from typing import List, ClassVar, Any, Optional
import sys
import threading
from random_user import generate_random_user_properties
# Logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def init_mixpannel_clients(mxp_tokens: List[str]) -> List[Mixpanel]:
"""
Return a list of mixpannel clients.
"""
projects: List[Mixpanel] = []
logger.info('Found %s Mixpannel tokens.', len(mxp_tokens))
for project_token in mxp_tokens:
mp = Mixpanel(project_token)
projects.append(mp)
logger.info('%s Mixpannel projects ready to go.', len(projects))
return projects
MXP_PROJECTS = init_mixpannel_clients(mxp_tokens=MIXPANNEL_TOKENS)
def generate_random_ip() ->str:
"""
Generate random IP address. Copied from
https://codereview.stackexchange.com/questions/200337/random-ip-address-generator
with some changes to generate valid looking IP addresses.
"""
while (True):
trials: int = 0
try:
trials += 1
# instances an IPv4Address object from those bits
# generates an integer with 32 random bits
bits = random.getrandbits(32)
addr = IPv4Address(bits)
except AddressValueError:
continue
if not addr.is_private or not addr.is_reserved:
break
ip_address = str(addr)
logger.info('Generated %s IP address after %s attempt', ip_address, trials)
return ip_address
class BaseShopper(object):
def __init__(self):
self.uuid = str(uuid.uuid4()) # type: ignore
random_device_os = random.choice(DEVICE_OS_CHOICES)
self.user_agent: str = RANDOM_AGENT.random_agent(*random_device_os)
self.ip_address: str = generate_random_ip()
self.base_properties: dict = {
'uuid': self.uuid,
'user_agent': self.user_agent,
'ip': self.ip_address,
}
self.properties = self.base_properties
def visit(self, end_point: str, extra: Optional[dict] = None):
"""
Send mixpannel API a visit metric.
"""
properties_to_send: dict
if extra:
properties_to_send = {**self.properties, **extra}
else:
properties_to_send = self.properties
for project in MXP_PROJECTS:
project.track(self.uuid, end_point, properties=properties_to_send)
class UnregisteredShopper(BaseShopper):
pass
class User(BaseShopper):
"""
A registered customer.
"""
def __init__(self, properties: Optional[dict] = None) -> None:
if not properties:
super().__init__()
else:
self.uuid = properties['uuid']
self.user_agent = properties['user_agent']
self.ip_address = properties['ip']
self.user_properties: dict = generate_random_user_properties()
properties = properties or self.base_properties
self.properties: dict = {
**self.user_properties, **properties
}
self.add_user_to_all_projects(properties=self.properties)
users_pool.append(self)
def _people_set(self, mxp_project: Mixpanel, properties: dict):
mxp_project.people_set(self.uuid, properties)
def add_user_to_all_projects(self, properties: dict):
mxp_project: Optional[Mixpanel] = None
for mxp_project in MXP_PROJECTS:
self._people_set(
mxp_project=mxp_project,
properties=properties
)
@classmethod
def register_requester(cls, requester: UnregisteredShopper):
return cls(properties=requester.base_properties)
users_pool: List[User] = []
class Visit(object):
"""
Simple customer of the website. This might be a registered user or a random unregistered user.
"""
user_journy: List[str] = []
user_cart: List[str] = []
def start(self) -> None:
self.requester = self.choose_requester()
self._visit_main_page()
def choose_requester(self) -> BaseShopper:
"""
Return a Shopper object
"""
self.is_registered = random_bool()
requester: BaseShopper
if self.is_registered and users_pool:
requester = random.choice(users_pool) # type: ignore
else:
requester = UnregisteredShopper()
return requester
def _visit_main_page(self):
"""
In main page, the user might visit an item page or drop.
"""
self.requester.visit('main page')
self._visit_item_page()
def _visit_item_page(self):
"""
In an item page, users can:
1. Add the item into the cart.
2. Return to main page.
3. Drop.
"""
requester_progressed = random_choice([(True, 70), (False, 30)])
if requester_progressed:
product = random_choice(SHOP_PRODUCTS)
self.requester.visit(
'Visit item page',
extra={
'item name': product
}
)
self._add_item_to_cart(product)
else:
requester_progressed = random_bool()
if requester_progressed:
# Let us assume that they need to go to home page.
self._visit_main_page()
def _add_item_to_cart(self, item: str):
add_item_to_cart = random_choice([(True, 70), (False, 30)])
if add_item_to_cart:
self.requester.visit(
'Add item to cart',
extra={
'item name': item
}
)
self.user_cart.append(item)
else:
continue_to_checkout = random_choice([(True, 70), (False, 30)])
if continue_to_checkout:
self._visit_checkout()
else:
user_drop = random_bool()
if not user_drop:
self._visit_main_page()
def _visit_checkout(self):
if not self.user_cart:
return
requester_progressed = random_choice([(True, 70), (False, 30)])
if requester_progressed:
self._visit_register()
self.requester.visit('Checkout', extra={'items': self.user_cart})
self.user_cart = []
def _visit_register(self):
if self.is_registered or type(self.requester) == User:
return
user_registered = random_choice([(True, 70), (False, 30)])
if user_registered:
self.requester.visit('Register', extra={'items': self.user_cart})
self.requester = User.register_requester(self.requester)
def random_bool() -> bool:
return random.choice([True, False])
def start_a_visit():
vi = Visit()
vi.start()
def start_script():
for _number in range(1000):
try:
threading.Thread(target=start_a_visit).start()
except Exception as err:
logger.exception(err)
if __name__ == '__main__':
start_script()
| 3,042 | 485 | 330 |
b8c4266b2925ad6241ed7027867d24c3df455168 | 1,502 | py | Python | setup.py | ceos-seo/odc-gee | 4033bb7bbc9645c497e0277970c5f01d83ff2a82 | [
"Apache-2.0"
] | 5 | 2021-01-22T18:42:49.000Z | 2022-01-17T23:00:31.000Z | setup.py | ceos-seo/odc-gee | 4033bb7bbc9645c497e0277970c5f01d83ff2a82 | [
"Apache-2.0"
] | 2 | 2022-03-02T14:22:38.000Z | 2022-03-14T16:07:05.000Z | setup.py | ceos-seo/odc-gee | 4033bb7bbc9645c497e0277970c5f01d83ff2a82 | [
"Apache-2.0"
] | 1 | 2021-08-18T17:00:20.000Z | 2021-08-18T17:00:20.000Z | #!/usr/bin/env python
''' ODC-GEE Setup '''
from setuptools import setup, find_packages
with open('README.md', 'r', encoding='utf-8') as fh:
LONG_DESCRIPTION = fh.read()
setup(name='odc-gee',
version='2.25',
author='Andrew Lubawy',
author_email='andrew.m.lubawy@ama-inc.com',
description='Google Earth Engine indexing tools for Open Data Cube',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
license='Apache-2.0',
url='https://github.com/ceos-seo/odc-gee',
project_urls={
'Bug Tracker': 'https://github.com/ceos-seo/odc-gee/issues'
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: GIS',
],
install_requires=[
"click-plugins>=1.1.1",
"click>=7.1.2",
"datacube>=1.8.3",
"earthengine-api>=0.1.24",
"numpy>=1.18.4",
"rasterio>=1.1.8",
],
packages=find_packages(exclude=['tests*']),
python_requires=">=3.6",
scripts=['scripts/index_gee', 'scripts/new_product'],)
| 34.930233 | 74 | 0.579893 | #!/usr/bin/env python
''' ODC-GEE Setup '''
from setuptools import setup, find_packages
with open('README.md', 'r', encoding='utf-8') as fh:
LONG_DESCRIPTION = fh.read()
setup(name='odc-gee',
version='2.25',
author='Andrew Lubawy',
author_email='andrew.m.lubawy@ama-inc.com',
description='Google Earth Engine indexing tools for Open Data Cube',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
license='Apache-2.0',
url='https://github.com/ceos-seo/odc-gee',
project_urls={
'Bug Tracker': 'https://github.com/ceos-seo/odc-gee/issues'
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: GIS',
],
install_requires=[
"click-plugins>=1.1.1",
"click>=7.1.2",
"datacube>=1.8.3",
"earthengine-api>=0.1.24",
"numpy>=1.18.4",
"rasterio>=1.1.8",
],
packages=find_packages(exclude=['tests*']),
python_requires=">=3.6",
scripts=['scripts/index_gee', 'scripts/new_product'],)
| 0 | 0 | 0 |
d114c061a9dfcda5bd45bff06e9a156fac926065 | 3,198 | py | Python | srt_deepl/main.py | sinedie/SRT-DeepL-traslator | 18f70ee70506160363f87b8fcd12c80180fa9eb6 | [
"WTFPL"
] | 19 | 2020-12-10T19:42:15.000Z | 2022-03-22T00:22:59.000Z | srt_deepl/main.py | sinedie/SRT-DeepL-traslator | 18f70ee70506160363f87b8fcd12c80180fa9eb6 | [
"WTFPL"
] | 15 | 2021-01-29T20:31:11.000Z | 2021-11-03T11:18:28.000Z | srt_deepl/main.py | sinedie/SRT-DeepL-traslator | 18f70ee70506160363f87b8fcd12c80180fa9eb6 | [
"WTFPL"
] | 6 | 2021-11-03T11:48:54.000Z | 2022-03-27T20:40:00.000Z | import os
import glob
import logging
import random
import geckodriver_autoinstaller
from selenium import webdriver
from selenium.webdriver.common.proxy import Proxy, ProxyType
from .deepl import Translator
from .srt_parser import wrap_line, save_srt
from .utils import get_proxies
# Check if the current version of geckodriver exists
geckodriver_autoinstaller.install()
INPUT_LANG = {
"auto": "Any language (detect)",
"bg": "Bulgarian",
"zh": "Chinese",
"cs": "Czech",
"da": "Danish",
"nl": "Dutch",
"en": "English",
"et": "Estonian",
"fi": "Finnish",
"fr": "French",
"de": "German",
"el": "Greek",
"hu": "Hungarian",
"it": "Italian",
"ja": "Japanese",
"lv": "Latvian",
"lt": "Lithuanian",
"pl": "Polish",
"pt": "Portuguese",
"ro": "Romanian",
"ru": "Russian",
"sk": "Slovak",
"sl": "Slovenian",
"es": "Spanish",
"sv": "Swedish",
}
OUTPUT_LANG = {
"bg": "Bulgarian",
"zh": "Chinese (simplified)",
"cs": "Czech",
"da": "Danish",
"nl": "Dutch",
"en": "English",
"et": "Estonian",
"fi": "Finnish",
"fr": "French",
"de": "German",
"el": "Greek",
"hu": "Hungarian",
"it": "Italian",
"ja": "Japanese",
"lv": "Latvian",
"lt": "Lithuanian",
"pl": "Polish",
"pt": "Portuguese",
"br": "Portuguese (Brazilian)",
"ro": "Romanian",
"ru": "Russian",
"sk": "Slovak",
"sl": "Slovenian",
"es": "Spanish",
"sv": "Swedish",
}
| 22.521127 | 67 | 0.536898 | import os
import glob
import logging
import random
import geckodriver_autoinstaller
from selenium import webdriver
from selenium.webdriver.common.proxy import Proxy, ProxyType
from .deepl import Translator
from .srt_parser import wrap_line, save_srt
from .utils import get_proxies
# Check if the current version of geckodriver exists
geckodriver_autoinstaller.install()
INPUT_LANG = {
"auto": "Any language (detect)",
"bg": "Bulgarian",
"zh": "Chinese",
"cs": "Czech",
"da": "Danish",
"nl": "Dutch",
"en": "English",
"et": "Estonian",
"fi": "Finnish",
"fr": "French",
"de": "German",
"el": "Greek",
"hu": "Hungarian",
"it": "Italian",
"ja": "Japanese",
"lv": "Latvian",
"lt": "Lithuanian",
"pl": "Polish",
"pt": "Portuguese",
"ro": "Romanian",
"ru": "Russian",
"sk": "Slovak",
"sl": "Slovenian",
"es": "Spanish",
"sv": "Swedish",
}
OUTPUT_LANG = {
"bg": "Bulgarian",
"zh": "Chinese (simplified)",
"cs": "Czech",
"da": "Danish",
"nl": "Dutch",
"en": "English",
"et": "Estonian",
"fi": "Finnish",
"fr": "French",
"de": "German",
"el": "Greek",
"hu": "Hungarian",
"it": "Italian",
"ja": "Japanese",
"lv": "Latvian",
"lt": "Lithuanian",
"pl": "Polish",
"pt": "Portuguese",
"br": "Portuguese (Brazilian)",
"ro": "Romanian",
"ru": "Russian",
"sk": "Slovak",
"sl": "Slovenian",
"es": "Spanish",
"sv": "Swedish",
}
def translate(
filepath,
lang_from,
lang_to,
wrap_limit=50,
delete_old=False,
driver=None,
):
if driver is None:
proxies = get_proxies()
if len(proxies) == 0:
proxies = get_proxies(https=False)
my_proxy = random.choice(proxies)
proxy = Proxy(
{
"proxyType": ProxyType.MANUAL,
"httpProxy": my_proxy,
"ftpProxy": my_proxy,
"sslProxy": my_proxy,
"noProxy": "", # set this value as desired
}
)
driver = webdriver.Firefox(proxy=proxy)
driver.maximize_window()
translator = Translator(driver)
lang_from = {
"lang": lang_from,
"description": INPUT_LANG[lang_from],
}
lang_to = {
"lang": lang_to,
"description": OUTPUT_LANG[lang_to],
}
if type(filepath) == str:
filepath = [filepath]
elif type(filepath) != list:
raise TypeError("Filepath must be str or list")
files = []
for fpath in filepath:
if os.path.isdir(fpath):
files += glob.glob(fpath + "/**/*.srt", recursive=True)
elif os.path.splitext(fpath)[-1].lower() == ".srt":
files.append(fpath)
for fpath in files:
subs = translator.translate(
fpath,
lang_from,
lang_to,
wrap_limit,
)
for sub in subs:
if len(sub.content) > wrap_limit:
sub.content = wrap_line(sub.content, wrap_limit)
fname = os.path.splitext(fpath)[0]
save_srt(fname, lang_to["lang"], subs)
translator.close()
| 1,662 | 0 | 23 |
368727f0212caa910aa1d94e6504f1a49b15faf4 | 736 | py | Python | worker.py | sdmichelini/Votes2 | 0f95aacf025831b01ba0026ffc37cd81dc4f1168 | [
"MIT"
] | null | null | null | worker.py | sdmichelini/Votes2 | 0f95aacf025831b01ba0026ffc37cd81dc4f1168 | [
"MIT"
] | 1 | 2016-06-09T23:29:57.000Z | 2016-06-10T00:01:25.000Z | worker.py | sdmichelini/Votes2 | 0f95aacf025831b01ba0026ffc37cd81dc4f1168 | [
"MIT"
] | null | null | null | '''
This is the worker module for the votes task handler.
'''
from google.appengine.ext import ndb
import json
import logging
import model.poll as Poll
import model.vote as Vote
import webapp2
USER_VOTES = {}
#Called when there is a new vote
#Add a New Vote
app = webapp2.WSGIApplication([
('/worker/process_vote', VoteHandler)
],debug = True)
| 19.368421 | 60 | 0.71875 | '''
This is the worker module for the votes task handler.
'''
from google.appengine.ext import ndb
import json
import logging
import model.poll as Poll
import model.vote as Vote
import webapp2
USER_VOTES = {}
#Called when there is a new vote
class VoteHandler(webapp2.RequestHandler):
#Add a New Vote
def post(self):
#Person Who Voted
user = self.request.get('user')
#Their Vote
vote = self.request.get('vote')
#Vote in our cache
Vote.vote(user,vote)
def delete(self):
self.response.headers['Content-Type'] = 'application/json'
Vote.reset_vote()
msg = {'message':'Votes Deleted'}
self.response.write(json.dumps(msg))
app = webapp2.WSGIApplication([
('/worker/process_vote', VoteHandler)
],debug = True)
| 292 | 21 | 69 |
ad69b8c3633575eb06f91529ff2314397666a9d8 | 81 | py | Python | dexis/__init__.py | i2mint/dexis | 2bd2097a7fd242aa6379162ea0499b02f66a3704 | [
"Apache-2.0"
] | null | null | null | dexis/__init__.py | i2mint/dexis | 2bd2097a7fd242aa6379162ea0499b02f66a3704 | [
"Apache-2.0"
] | null | null | null | dexis/__init__.py | i2mint/dexis | 2bd2097a7fd242aa6379162ea0499b02f66a3704 | [
"Apache-2.0"
] | null | null | null | """
A language to describe data, its interpretations and its transformations.
""" | 27 | 73 | 0.765432 | """
A language to describe data, its interpretations and its transformations.
""" | 0 | 0 | 0 |
6866c6b3901676fed7ace71db8cb8a3e91101cad | 7,620 | py | Python | tests/integration/test_settings_profile/test.py | etozhevodim/ClickHouse | 6d80ab1eed98205c23845cba902487e518a368fa | [
"Apache-2.0"
] | null | null | null | tests/integration/test_settings_profile/test.py | etozhevodim/ClickHouse | 6d80ab1eed98205c23845cba902487e518a368fa | [
"Apache-2.0"
] | null | null | null | tests/integration/test_settings_profile/test.py | etozhevodim/ClickHouse | 6d80ab1eed98205c23845cba902487e518a368fa | [
"Apache-2.0"
] | null | null | null | import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance', config_dir="configs")
@pytest.fixture(scope="module", autouse=True)
@pytest.fixture(autouse=True)
| 59.069767 | 155 | 0.749738 | import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance', config_dir="configs")
@pytest.fixture(scope="module", autouse=True)
def setup_nodes():
try:
cluster.start()
instance.query("CREATE USER robin")
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def reset_after_test():
try:
yield
finally:
instance.query("CREATE USER OR REPLACE robin")
instance.query("DROP ROLE IF EXISTS worker")
instance.query("DROP SETTINGS PROFILE IF EXISTS xyz, alpha")
def test_settings_profile():
# Set settings and constraints via CREATE SETTINGS PROFILE ... TO user
instance.query("CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000 TO robin")
assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000001\n"
assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin")
assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin")
instance.query("ALTER SETTINGS PROFILE xyz TO NONE")
assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n"
instance.query("SET max_memory_usage = 80000000", user="robin")
instance.query("SET max_memory_usage = 120000000", user="robin")
# Set settings and constraints via CREATE USER ... SETTINGS PROFILE
instance.query("ALTER USER robin SETTINGS PROFILE xyz")
assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000001\n"
assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin")
assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin")
instance.query("ALTER USER robin SETTINGS NONE")
assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n"
instance.query("SET max_memory_usage = 80000000", user="robin")
instance.query("SET max_memory_usage = 120000000", user="robin")
def test_settings_profile_from_granted_role():
# Set settings and constraints via granted role
instance.query("CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000001 MIN 90000000 MAX 110000000")
instance.query("CREATE ROLE worker SETTINGS PROFILE xyz")
instance.query("GRANT worker TO robin")
assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000001\n"
assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin")
assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin")
instance.query("REVOKE worker FROM robin")
assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n"
instance.query("SET max_memory_usage = 80000000", user="robin")
instance.query("SET max_memory_usage = 120000000", user="robin")
instance.query("ALTER ROLE worker SETTINGS NONE")
instance.query("GRANT worker TO robin")
assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n"
instance.query("SET max_memory_usage = 80000000", user="robin")
instance.query("SET max_memory_usage = 120000000", user="robin")
# Set settings and constraints via CREATE SETTINGS PROFILE ... TO granted role
instance.query("ALTER SETTINGS PROFILE xyz TO worker")
assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000001\n"
assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin")
assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin")
instance.query("ALTER SETTINGS PROFILE xyz TO NONE")
assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n"
instance.query("SET max_memory_usage = 80000000", user="robin")
instance.query("SET max_memory_usage = 120000000", user="robin")
def test_inheritance_of_settings_profile():
instance.query("CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000002 READONLY")
instance.query("CREATE SETTINGS PROFILE alpha SETTINGS PROFILE xyz TO robin")
assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000002\n"
assert "Setting max_memory_usage should not be changed" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin")
def test_alter_and_drop():
instance.query("CREATE SETTINGS PROFILE xyz SETTINGS max_memory_usage = 100000003 MIN 90000000 MAX 110000000 TO robin")
assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "100000003\n"
assert "Setting max_memory_usage shouldn't be less than 90000000" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin")
assert "Setting max_memory_usage shouldn't be greater than 110000000" in instance.query_and_get_error("SET max_memory_usage = 120000000", user="robin")
instance.query("ALTER SETTINGS PROFILE xyz SETTINGS readonly=1")
assert "Cannot modify 'max_memory_usage' setting in readonly mode" in instance.query_and_get_error("SET max_memory_usage = 80000000", user="robin")
instance.query("DROP SETTINGS PROFILE xyz")
assert instance.query("SELECT value FROM system.settings WHERE name = 'max_memory_usage'", user="robin") == "10000000000\n"
instance.query("SET max_memory_usage = 80000000", user="robin")
instance.query("SET max_memory_usage = 120000000", user="robin")
def test_allow_introspection():
assert "Not enough privileges" in instance.query_and_get_error("SELECT demangle('a')", user="robin")
instance.query("GRANT ALL ON *.* TO robin")
assert "Introspection functions are disabled" in instance.query_and_get_error("SELECT demangle('a')", user="robin")
instance.query("ALTER USER robin SETTINGS allow_introspection_functions=1")
assert instance.query("SELECT demangle('a')", user="robin") == "signed char\n"
instance.query("ALTER USER robin SETTINGS NONE")
assert "Introspection functions are disabled" in instance.query_and_get_error("SELECT demangle('a')", user="robin")
instance.query("CREATE SETTINGS PROFILE xyz SETTINGS allow_introspection_functions=1 TO robin")
assert instance.query("SELECT demangle('a')", user="robin") == "signed char\n"
instance.query("DROP SETTINGS PROFILE xyz")
assert "Introspection functions are disabled" in instance.query_and_get_error("SELECT demangle('a')", user="robin")
instance.query("REVOKE ALL ON *.* FROM robin")
assert "Not enough privileges" in instance.query_and_get_error("SELECT demangle('a')", user="robin")
| 7,211 | 0 | 159 |
41b7de3cbb4f7ed644c2eee320c37f883cfeeaf1 | 114 | py | Python | api_test/apps.py | RockyLiys/api_auto_test_platform | 7e6aec23b6f54d20c534b77a2679daf37b65c960 | [
"MIT"
] | null | null | null | api_test/apps.py | RockyLiys/api_auto_test_platform | 7e6aec23b6f54d20c534b77a2679daf37b65c960 | [
"MIT"
] | 10 | 2019-12-04T23:30:37.000Z | 2022-02-10T12:05:45.000Z | api_test/apps.py | RockyLiys/api_auto_test_platform | 7e6aec23b6f54d20c534b77a2679daf37b65c960 | [
"MIT"
] | 3 | 2019-07-01T10:18:40.000Z | 2019-07-06T00:59:19.000Z | from django.apps import AppConfig
| 16.285714 | 33 | 0.719298 | from django.apps import AppConfig
class ApiTestConfig(AppConfig):
name = 'api_test'
verbose_name = '中文'
| 0 | 60 | 23 |
43f469504caa3fb75b696f8f837183718fed2a1e | 7,978 | py | Python | Sawtooth/families/health/client/health_process.py | ishtjot/susereumutep | 56e20c1777e0c938ac42bd8056f84af9e0b76e46 | [
"Apache-2.0"
] | 2 | 2018-11-07T20:52:53.000Z | 2019-10-20T15:57:01.000Z | Sawtooth/families/health/client/health_process.py | ishtjot/susereumutep | 56e20c1777e0c938ac42bd8056f84af9e0b76e46 | [
"Apache-2.0"
] | 3 | 2021-12-14T20:57:54.000Z | 2022-01-21T23:50:36.000Z | Sawtooth/families/health/client/health_process.py | ishtjot/susereumutep | 56e20c1777e0c938ac42bd8056f84af9e0b76e46 | [
"Apache-2.0"
] | 2 | 2018-11-16T04:20:06.000Z | 2019-03-28T23:49:13.000Z | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
"""
Health Calculation Process
This process calculates the health of a code base by using the code smells for
the project. It reads the csv file from code analyzer and the code_smell.toml
"""
import csv
import toml
import os
def health_function(_type, _smell , _cm, rows, switch_cs_data):
"""
For each transaction of code analyzer calculates the corresponding health
based on the code smell.
Args:
_type (str): type of code is either class or method
_smell (str): description of the code smell to evaluate
_cm (str) : code measure value
rows (int) : number of rows calculated for a specific code smell
switch_cs_data (int, float) : code smell data dictionary
Returns:
h (float): health of the transaction code analized
"""
if 'Ratio' in _smell: #For ratio measures , multiply by 100 and use float type
_cm = float(_cm) * 100
elif _cm == '-':
return 0
else:
_cm = int(_cm)
rw = 100 #Maximum reward for good code health
#health, Small Code Smell, Large Code Smell
h = scs = lcs = 0.00
#Weigth Small Code Smell, Weight Large Code Smell
wt_scs = wt_lcs = 1
#Check the type of code (Class or Method) then find the code smell ranges
if _type == "class":
if _smell == "Lines of Code":
scs_list = switch_cs_data.get(_type).get('SmallClass')
scs = scs_list[0]
wt_scs = scs_list[1]
lcs_list = switch_cs_data.get(_type).get('LargeClass')
lcs = lcs_list[0]
wt_lcs = lcs_list [1]
elif _smell == "Comment-to-Code Ratio":
scs_list = switch_cs_data.get('comments').get('CommentsToCodeRatioLower')
scs = scs_list[0] * 100
wt_scs = scs_list[1] * 100
lcs_list = switch_cs_data.get('comments').get('CommentsToCodeRatioUpper')
lcs = lcs_list[0] * 100
wt_lcs = lcs_list [1] * 100
elif _smell == "Number of Outgoing Invocations": #GOD class for Classes
lcs_list = switch_cs_data.get(_type).get('GodClass')
lcs = lcs_list[0]
wt_lcs = lcs_list [1]
elif _smell == "Number of Directly-Used Elements": #InappropiateIntimacy for Classes
lcs_list = switch_cs_data.get(_type).get('InappropriateIntimacy')
lcs = lcs_list[0]
wt_lcs = lcs_list [1]
elif _smell == "Number of Parameters":
return 0
else:
return 0
elif _type == "method":
if _smell == "Lines of Code":
scs_list = switch_cs_data.get(_type).get('SmallMethod')
scs = scs_list[0]
wt_scs = scs_list[1]
lcs_list = switch_cs_data.get(_type).get('LargeMethod')
lcs = lcs_list[0]
wt_lcs = lcs_list [1]
elif _smell == "Comment-to-Code Ratio":
scs_list = switch_cs_data.get('comments').get('CommentsToCodeRatioLower')
scs = scs_list[0] * 100
wt_scs = scs_list[1] * 100
lcs_list = switch_cs_data.get('comments').get('CommentsToCodeRatioUpper')
lcs = lcs_list[0] * 100
wt_lcs = lcs_list [1] * 100
elif _smell == "Number of Outgoing Invocations": #NO GOD class for Methods
return 0
elif _smell == "Number of Directly-Used Elements": #NO InappropiateIntimacy for Methods
return 0
elif _smell == "Number of Parameters":
lcs_list = switch_cs_data.get(_type).get('LargeParameterList')
lcs = lcs_list[0]
wt_lcs = lcs_list [1]
else:
return 0
#Fixes zero division if both code smells are zero
scs = scs * wt_scs # Multiply Code Smell by Weight
lcs = lcs * wt_lcs # Multiply Code Smell by Weight
if scs == 0 and lcs ==0:
return 0
rows[_smell] = rows[_smell] + 1 # Row counter per type of smell
if _cm < scs: #Condition for penalization when code metric is under small Code Smell (cm < scm)
h = rw - ((_cm - scs)**2) / (scs**2) * rw
return h
elif _cm <= lcs:
h = rw
return h
#Fixes zero division if large code smells is zero
elif _cm > lcs and lcs != 0: #Condition for penalization when code metric is over large Code Smell (cm > lcs)
h = rw - ((_cm - lcs)**2) / (lcs**2) * rw
if h < 0:
h = 0
return h
else:
return 100
def calculate_health(suse_config, csv_path):
"""
Opens the csv file from code analyzer that contains all the transactions of the
code base. A for loop traverses each transaction to call the health_function,
sums the results and gets the average heal for the code base.
Args:
suse_config (int, float) : code smell data dictionary
csv_path (str): Type of code is either class or method
Returns:
total_health (float): Total health of the code base
"""
if os.path.exists(csv_path):
with open(csv_path, newline='') as csvfile:
# Using csv Reader
reader = csv.reader(csvfile)
# CSV Header list:
# 0: Type of Smell, 1: Name, 2: Lines of Code, 3: Comment-to-Code Ratio
# 4: Number of Directly-Used Elements, 5: Number of Outgoing Invocations
# 6: Name of Owner Class, 7: Number of Parameters
head = next(reader)
# h is a DD with the necessary Header to count returned by health_function
h = {head[2]: 0, head[3]: 0.00, head[5]: 0, head[4]: 0,head[7]: 0}
rows = {head[2]: 0, head[3]: 0, head[5]: 0, head[4]: 0, head[7]: 0}
avg = {head[2]: 0.00, head[3]: 0.00, head[5]: 0, head[4]: 0, head[7]: 0.00}
lines = 0
for x in reader:
h[head[2]] = h[head[2]] + health_function(x[0].lower(), head[2], x[2], rows, suse_config)
h[head[3]] = h[head[3]] + health_function(x[0].lower(), head[3], x[3], rows, suse_config)
h[head[4]] = h[head[4]] + health_function(x[0].lower(), head[4], x[4], rows, suse_config)
h[head[5]] = h[head[5]] + health_function(x[0].lower(), head[5], x[5], rows, suse_config)
h[head[7]] = h[head[7]] + health_function(x[0].lower(), head[7], x[7], rows, suse_config)
lines = lines +1
if lines == 0:
total_health = -2
return (total_health) # Return -2 when file is empty
#Calculate average of each header
#Validates each measure has rows > 0
div = 0
if rows[head[2]] > 0:
avg[head[2]] = h[head[2]]/rows[head[2]]
div = div +1
if rows[head[3]]>0:
avg[head[3]] = h[head[3]]/rows[head[3]]
div = div +1
if rows[head[5]]>0:
avg[head[5]] = h[head[5]]/rows[head[5]]
div = div +1
if rows[head[4]]>0:
avg[head[4]] = h[head[4]]/rows[head[4]]
div = div +1
if rows[head[7]]>0:
avg[head[7]] = h[head[7]]/rows[head[7]]
div = div +1
#Validates number of code smells calculated > 0
if div > 0:
total_health = (avg[head[2]] + avg[head[3]] + avg[head[5]] + avg[head[4]] + avg[head[7]]) / div
else:
total_health = 0
return total_health
else:
print("File not found")
total_health = -1
return (total_health) # Return -1 when file is not found
| 38.728155 | 112 | 0.593006 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
"""
Health Calculation Process
This process calculates the health of a code base by using the code smells for
the project. It reads the csv file from code analyzer and the code_smell.toml
"""
import csv
import toml
import os
def health_function(_type, _smell , _cm, rows, switch_cs_data):
"""
For each transaction of code analyzer calculates the corresponding health
based on the code smell.
Args:
_type (str): type of code is either class or method
_smell (str): description of the code smell to evaluate
_cm (str) : code measure value
rows (int) : number of rows calculated for a specific code smell
switch_cs_data (int, float) : code smell data dictionary
Returns:
h (float): health of the transaction code analized
"""
if 'Ratio' in _smell: #For ratio measures , multiply by 100 and use float type
_cm = float(_cm) * 100
elif _cm == '-':
return 0
else:
_cm = int(_cm)
rw = 100 #Maximum reward for good code health
#health, Small Code Smell, Large Code Smell
h = scs = lcs = 0.00
#Weigth Small Code Smell, Weight Large Code Smell
wt_scs = wt_lcs = 1
#Check the type of code (Class or Method) then find the code smell ranges
if _type == "class":
if _smell == "Lines of Code":
scs_list = switch_cs_data.get(_type).get('SmallClass')
scs = scs_list[0]
wt_scs = scs_list[1]
lcs_list = switch_cs_data.get(_type).get('LargeClass')
lcs = lcs_list[0]
wt_lcs = lcs_list [1]
elif _smell == "Comment-to-Code Ratio":
scs_list = switch_cs_data.get('comments').get('CommentsToCodeRatioLower')
scs = scs_list[0] * 100
wt_scs = scs_list[1] * 100
lcs_list = switch_cs_data.get('comments').get('CommentsToCodeRatioUpper')
lcs = lcs_list[0] * 100
wt_lcs = lcs_list [1] * 100
elif _smell == "Number of Outgoing Invocations": #GOD class for Classes
lcs_list = switch_cs_data.get(_type).get('GodClass')
lcs = lcs_list[0]
wt_lcs = lcs_list [1]
elif _smell == "Number of Directly-Used Elements": #InappropiateIntimacy for Classes
lcs_list = switch_cs_data.get(_type).get('InappropriateIntimacy')
lcs = lcs_list[0]
wt_lcs = lcs_list [1]
elif _smell == "Number of Parameters":
return 0
else:
return 0
elif _type == "method":
if _smell == "Lines of Code":
scs_list = switch_cs_data.get(_type).get('SmallMethod')
scs = scs_list[0]
wt_scs = scs_list[1]
lcs_list = switch_cs_data.get(_type).get('LargeMethod')
lcs = lcs_list[0]
wt_lcs = lcs_list [1]
elif _smell == "Comment-to-Code Ratio":
scs_list = switch_cs_data.get('comments').get('CommentsToCodeRatioLower')
scs = scs_list[0] * 100
wt_scs = scs_list[1] * 100
lcs_list = switch_cs_data.get('comments').get('CommentsToCodeRatioUpper')
lcs = lcs_list[0] * 100
wt_lcs = lcs_list [1] * 100
elif _smell == "Number of Outgoing Invocations": #NO GOD class for Methods
return 0
elif _smell == "Number of Directly-Used Elements": #NO InappropiateIntimacy for Methods
return 0
elif _smell == "Number of Parameters":
lcs_list = switch_cs_data.get(_type).get('LargeParameterList')
lcs = lcs_list[0]
wt_lcs = lcs_list [1]
else:
return 0
#Fixes zero division if both code smells are zero
scs = scs * wt_scs # Multiply Code Smell by Weight
lcs = lcs * wt_lcs # Multiply Code Smell by Weight
if scs == 0 and lcs ==0:
return 0
rows[_smell] = rows[_smell] + 1 # Row counter per type of smell
if _cm < scs: #Condition for penalization when code metric is under small Code Smell (cm < scm)
h = rw - ((_cm - scs)**2) / (scs**2) * rw
return h
elif _cm <= lcs:
h = rw
return h
#Fixes zero division if large code smells is zero
elif _cm > lcs and lcs != 0: #Condition for penalization when code metric is over large Code Smell (cm > lcs)
h = rw - ((_cm - lcs)**2) / (lcs**2) * rw
if h < 0:
h = 0
return h
else:
return 100
def calculate_health(suse_config, csv_path):
"""
Opens the csv file from code analyzer that contains all the transactions of the
code base. A for loop traverses each transaction to call the health_function,
sums the results and gets the average heal for the code base.
Args:
suse_config (int, float) : code smell data dictionary
csv_path (str): Type of code is either class or method
Returns:
total_health (float): Total health of the code base
"""
if os.path.exists(csv_path):
with open(csv_path, newline='') as csvfile:
# Using csv Reader
reader = csv.reader(csvfile)
# CSV Header list:
# 0: Type of Smell, 1: Name, 2: Lines of Code, 3: Comment-to-Code Ratio
# 4: Number of Directly-Used Elements, 5: Number of Outgoing Invocations
# 6: Name of Owner Class, 7: Number of Parameters
head = next(reader)
# h is a DD with the necessary Header to count returned by health_function
h = {head[2]: 0, head[3]: 0.00, head[5]: 0, head[4]: 0,head[7]: 0}
rows = {head[2]: 0, head[3]: 0, head[5]: 0, head[4]: 0, head[7]: 0}
avg = {head[2]: 0.00, head[3]: 0.00, head[5]: 0, head[4]: 0, head[7]: 0.00}
lines = 0
for x in reader:
h[head[2]] = h[head[2]] + health_function(x[0].lower(), head[2], x[2], rows, suse_config)
h[head[3]] = h[head[3]] + health_function(x[0].lower(), head[3], x[3], rows, suse_config)
h[head[4]] = h[head[4]] + health_function(x[0].lower(), head[4], x[4], rows, suse_config)
h[head[5]] = h[head[5]] + health_function(x[0].lower(), head[5], x[5], rows, suse_config)
h[head[7]] = h[head[7]] + health_function(x[0].lower(), head[7], x[7], rows, suse_config)
lines = lines +1
if lines == 0:
total_health = -2
return (total_health) # Return -2 when file is empty
#Calculate average of each header
#Validates each measure has rows > 0
div = 0
if rows[head[2]] > 0:
avg[head[2]] = h[head[2]]/rows[head[2]]
div = div +1
if rows[head[3]]>0:
avg[head[3]] = h[head[3]]/rows[head[3]]
div = div +1
if rows[head[5]]>0:
avg[head[5]] = h[head[5]]/rows[head[5]]
div = div +1
if rows[head[4]]>0:
avg[head[4]] = h[head[4]]/rows[head[4]]
div = div +1
if rows[head[7]]>0:
avg[head[7]] = h[head[7]]/rows[head[7]]
div = div +1
#Validates number of code smells calculated > 0
if div > 0:
total_health = (avg[head[2]] + avg[head[3]] + avg[head[5]] + avg[head[4]] + avg[head[7]]) / div
else:
total_health = 0
return total_health
else:
print("File not found")
total_health = -1
return (total_health) # Return -1 when file is not found
| 0 | 0 | 0 |
380911815b84b34201df774de173228db04d11bb | 832 | py | Python | slimano/core/persistence/models/nssi.py | ATNoG/5gcontact | fb65ae919977fb6411932b986bd565b962d9c0b9 | [
"MIT"
] | 1 | 2021-04-13T10:56:20.000Z | 2021-04-13T10:56:20.000Z | slimano/core/persistence/models/nssi.py | ATNoG/5gcontact | fb65ae919977fb6411932b986bd565b962d9c0b9 | [
"MIT"
] | null | null | null | slimano/core/persistence/models/nssi.py | ATNoG/5gcontact | fb65ae919977fb6411932b986bd565b962d9c0b9 | [
"MIT"
] | null | null | null | from persistence.models.models_base import Base
from sqlalchemy.orm import relationship
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey
| 32 | 71 | 0.695913 | from persistence.models.models_base import Base
from sqlalchemy.orm import relationship
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey
class Nssi(Base):
__tablename__ = 'nssi'
id = Column(String(40), primary_key=True)
name = Column(String(16))
template_name = Column(String(16))
shared = Column(Boolean)
location = Column(String(50))
inputs = Column(String(10000))
outputs = Column(String(10000))
nfvo_id = Column(String(40), ForeignKey('nfvo.id'))
nfvo = relationship("Nfvo", back_populates='nssi', lazy='subquery')
coe_id = Column(String(40), ForeignKey('coe.id'))
coe = relationship("Coe", back_populates='nssi', lazy='subquery')
# nsi_id = Column(Integer, ForeignKey('nsi.id'))
# nsi = relationship('Nsi', uselist=False, back_populates='nsis')
| 0 | 651 | 23 |
49f3e0202409a39f79ccd4fcf9e5092e749f92bb | 5,068 | py | Python | compile/ui_qr_to_text.py | LEv145/Python-QT5-QR-Coder | 33a6c0fe0dc6dcf24a7104696dff88626167d80b | [
"MIT"
] | 1 | 2020-07-20T09:49:52.000Z | 2020-07-20T09:49:52.000Z | compile/ui_qr_to_text.py | LEv145/Python-QT5-QR-Coder | 33a6c0fe0dc6dcf24a7104696dff88626167d80b | [
"MIT"
] | null | null | null | compile/ui_qr_to_text.py | LEv145/Python-QT5-QR-Coder | 33a6c0fe0dc6dcf24a7104696dff88626167d80b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'qr_to_text.ui'
##
## Created by: Qt User Interface Compiler version 5.15.0
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import (QCoreApplication, QDate, QDateTime, QMetaObject,
QObject, QPoint, QRect, QSize, QTime, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont,
QFontDatabase, QIcon, QKeySequence, QLinearGradient, QPalette, QPainter,
QPixmap, QRadialGradient)
from PySide2.QtWidgets import *
# setupUi
# retranslateUi
| 38.984615 | 170 | 0.648382 | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'qr_to_text.ui'
##
## Created by: Qt User Interface Compiler version 5.15.0
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import (QCoreApplication, QDate, QDateTime, QMetaObject,
QObject, QPoint, QRect, QSize, QTime, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont,
QFontDatabase, QIcon, QKeySequence, QLinearGradient, QPalette, QPainter,
QPixmap, QRadialGradient)
from PySide2.QtWidgets import *
class Ui_Form(object):
def setupUi(self, Form):
if not Form.objectName():
Form.setObjectName(u"Form")
Form.resize(580, 492)
Form.setStyleSheet(u"background-color: rgb(255, 131, 6)")
self.verticalLayout_2 = QVBoxLayout(Form)
self.verticalLayout_2.setObjectName(u"verticalLayout_2")
self.label = QLabel(Form)
self.label.setObjectName(u"label")
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setMinimumSize(QSize(0, 150))
font = QFont()
font.setFamily(u"MS Serif")
font.setPointSize(20)
self.label.setFont(font)
self.label.setStyleSheet(u"background-color: rgb(255, 84, 16);\n"
"color: rgb(253, 255, 255)")
self.label.setAlignment(Qt.AlignCenter)
self.verticalLayout_2.addWidget(self.label)
self.verticalLayout = QVBoxLayout()
self.verticalLayout.setObjectName(u"verticalLayout")
self.pushButton = QPushButton(Form)
self.pushButton.setObjectName(u"pushButton")
sizePolicy1 = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
sizePolicy1.setHorizontalStretch(0)
sizePolicy1.setVerticalStretch(0)
sizePolicy1.setHeightForWidth(self.pushButton.sizePolicy().hasHeightForWidth())
self.pushButton.setSizePolicy(sizePolicy1)
self.pushButton.setMinimumSize(QSize(0, 70))
font1 = QFont()
font1.setFamily(u"Miriam CLM")
font1.setPointSize(11)
font1.setBold(False)
font1.setWeight(50)
self.pushButton.setFont(font1)
self.pushButton.setCursor(QCursor(Qt.ArrowCursor))
self.pushButton.setStyleSheet(u"QPushButton{\n"
" border: 1px solid grey;\n"
" background-color: rgb(130, 81, 234);\n"
" color: white;\n"
"}\n"
"QPushButton:hover{\n"
" background-color: #9651EA;\n"
" border: 1px solid grey;\n"
" color: white;\n"
"}\n"
"QPushButton:pressed{\n"
" background-color: #AA38EA;\n"
" border: 1px solid grey;\n"
" color: white;\n"
"}")
self.verticalLayout.addWidget(self.pushButton)
self.label_2 = QTextBrowser(Form)
self.label_2.setObjectName(u"label_2")
self.label_2.setStyleSheet(u"background-color: white;")
self.verticalLayout.addWidget(self.label_2)
self.pushButton_2 = QPushButton(Form)
self.pushButton_2.setObjectName(u"pushButton_2")
sizePolicy2 = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed)
sizePolicy2.setHorizontalStretch(0)
sizePolicy2.setVerticalStretch(0)
sizePolicy2.setHeightForWidth(self.pushButton_2.sizePolicy().hasHeightForWidth())
self.pushButton_2.setSizePolicy(sizePolicy2)
self.pushButton_2.setMinimumSize(QSize(59, 20))
self.pushButton_2.setStyleSheet(u"\n"
"QPushButton{\n"
" border-radius: 5px;\n"
" border: 1px solid grey;\n"
" background-color: rgb(255, 46, 5);\n"
" color: white;\n"
"}\n"
"QPushButton:hover{\n"
" background-color: rgb(255, 16, 5);\n"
" border: 1px solid grey;\n"
" color: white;\n"
"}\n"
"QPushButton:pressed{\n"
" background-color: rgb(255, 0, 0);\n"
" border: 1px solid grey;\n"
" color: white;\n"
"}")
self.verticalLayout.addWidget(self.pushButton_2)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.retranslateUi(Form)
QMetaObject.connectSlotsByName(Form)
# setupUi
def retranslateUi(self, Form):
Form.setWindowTitle(QCoreApplication.translate("Form", u"Form", None))
self.label.setText(QCoreApplication.translate("Form", u"QR \u0432 \u0442\u0435\u043a\u0441\u0442", None))
self.pushButton.setText(QCoreApplication.translate("Form", u"\u0412\u044b\u0431\u0440\u0430\u0442\u044c \u043a\u0430\u0440\u0442\u0438\u043d\u043a\u0443", None))
self.pushButton_2.setText(QCoreApplication.translate("Form", u"\u0412\u0435\u0440\u043d\u0443\u0442\u044c\u0441\u044f", None))
# retranslateUi
| 4,198 | 1 | 81 |
4e4e311d038316a908d7279a12a458d53a643f25 | 5,985 | py | Python | tests/unit/mc3/test_mc3_maf_transform.py | bmeg/bmeg-etl | 3efa28a7775d6defd77457838e92817a2fbc9e99 | [
"MIT"
] | 1 | 2022-03-08T22:06:35.000Z | 2022-03-08T22:06:35.000Z | tests/unit/mc3/test_mc3_maf_transform.py | bmeg/bmeg-etl | 3efa28a7775d6defd77457838e92817a2fbc9e99 | [
"MIT"
] | 191 | 2018-07-09T20:49:34.000Z | 2021-02-09T18:44:28.000Z | tests/unit/mc3/test_mc3_maf_transform.py | bmeg/bmeg-etl | 3efa28a7775d6defd77457838e92817a2fbc9e99 | [
"MIT"
] | null | null | null |
""" test maf_transform """
import pytest
import transform.mc3.mc3_maf_transform as mc3_maf_transform
from bmeg.ioutils import reader
import os
import contextlib
import shutil
import json
@pytest.fixture
def maf_file(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/mc3/tcga_test.maf')
@pytest.fixture
def gz_file(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/mc3/tcga_gz-test.maf.gz')
@pytest.fixture
def no_center_file(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/mc3/tcga_test-NO_CENTER.maf')
@pytest.fixture
def NO_BARCODE_file(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/mc3/tcga_test-NO_BARCODE.maf')
@pytest.fixture
def id_lookup_path(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/gdc/id_lookup.tsv')
@pytest.fixture
def project_lookup_path(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/gdc/project_lookup.tsv')
def test_simple(helpers, maf_file, emitter_directory, id_lookup_path, project_lookup_path):
""" simple test """
validate(helpers, maf_file, emitter_directory, id_lookup_path, project_lookup_path)
def test_gz(helpers, gz_file, emitter_directory, id_lookup_path, project_lookup_path):
""" simple test """
validate(helpers, gz_file, emitter_directory, id_lookup_path, project_lookup_path)
def test_no_center(helpers, no_center_file, emitter_directory, id_lookup_path, project_lookup_path):
""" 'Center column' renamed """
validate(helpers, no_center_file, emitter_directory, id_lookup_path, project_lookup_path)
def test_NO_BARCODE(helpers, NO_BARCODE_file, emitter_directory, id_lookup_path, project_lookup_path):
""" no barcode """
validate(helpers, NO_BARCODE_file, emitter_directory, id_lookup_path, project_lookup_path)
| 40.714286 | 140 | 0.702423 |
""" test maf_transform """
import pytest
import transform.mc3.mc3_maf_transform as mc3_maf_transform
from bmeg.ioutils import reader
import os
import contextlib
import shutil
import json
@pytest.fixture
def maf_file(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/mc3/tcga_test.maf')
@pytest.fixture
def gz_file(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/mc3/tcga_gz-test.maf.gz')
@pytest.fixture
def no_center_file(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/mc3/tcga_test-NO_CENTER.maf')
@pytest.fixture
def NO_BARCODE_file(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/mc3/tcga_test-NO_BARCODE.maf')
@pytest.fixture
def id_lookup_path(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/gdc/id_lookup.tsv')
@pytest.fixture
def project_lookup_path(request):
""" get the full path of the test fixture """
return os.path.join(request.fspath.dirname, 'source/gdc/project_lookup.tsv')
def validate(helpers, maf_file, emitter_directory, id_lookup_path, project_lookup_path):
allele_file = os.path.join(emitter_directory, 'Allele.Vertex.json.gz')
callset_file = os.path.join(emitter_directory, 'SomaticCallset.Vertex.json.gz')
# deadletter_file = os.path.join(emitter_directory, 'Deadletter.Vertex.json.gz')
aliquot_callset_edge_file = os.path.join(emitter_directory, 'Aliquot_SomaticCallsets_SomaticCallset.Edge.json.gz')
callset_aliquot_edge_file = os.path.join(emitter_directory, 'SomaticCallset_Aliquots_Aliquot.Edge.json.gz')
allele_callset_edge_file = os.path.join(emitter_directory, 'Allele_SomaticCallsets_SomaticCallset.Edge.json.gz')
callset_allele_edge_file = os.path.join(emitter_directory, 'SomaticCallset_Alleles_Allele.Edge.json.gz')
all_files = [allele_file, callset_file,
aliquot_callset_edge_file, callset_aliquot_edge_file,
allele_callset_edge_file, callset_allele_edge_file]
# remove output
with contextlib.suppress(FileNotFoundError):
shutil.rmtree(emitter_directory)
# create output
mc3_maf_transform.transform(mafpath=maf_file,
id_lookup_path=id_lookup_path,
project_lookup_path=project_lookup_path,
emitter_directory=emitter_directory)
# ratify
for f in all_files:
if "Vertex.json.gz" in f:
helpers.assert_vertex_file_valid(f)
elif "Edge.json.gz" in f:
helpers.assert_edge_file_valid(f)
# test alleles edge contents
with reader(allele_callset_edge_file) as f:
for line in f:
# should be json
allelecall = json.loads(line)
if not (allelecall['from'].startswith("SomaticCallset") and allelecall['to'].startswith("Allele")):
continue
assert '|' not in allelecall['data']['methods'], 'call_method should not have a | separator'
allelecall_methods = set(allelecall['data']['methods'])
possible_allelecall_methods = set(["RADIA", "MUTECT", "MUSE", "VARSCANS", "INDELOCATOR", "VARSCANI", "PINDEL", "SOMATICSNIPER"])
assert allelecall_methods < possible_allelecall_methods, 'call_method should belong to vocabulary'
# test Allele contents
with reader(allele_file) as f:
for line in f:
# should be json
allele = json.loads(line)
assert allele['data']['reference_bases'] != allele['data']['alternate_bases'], 'reference should not equal alternate'
# check callset
with reader(callset_file) as f:
for line in f:
# should be json
callset = json.loads(line)
assert callset['gid'].startswith('SomaticCallset:MC3:'), 'should start with SomaticCallset:MC3:xxx'
assert not callset['gid'].startswith('SomaticCallset:MC3:Aliquot:'), 'should NOT start with SomaticCallset:MC3:Aliquot:xxx'
assert callset['data']['tumor_aliquot_id'] != callset['data']['normal_aliquot_id'], 'tumor should not equal normal'
assert 'Aliquot:' not in callset['data']['tumor_aliquot_id'], 'tumor_aliquot_id should not have Aliquot gid'
assert 'Aliquot:' not in callset['data']['normal_aliquot_id'], 'normal_aliquot_id should not have Aliquot gid'
# check callsetfor
with reader(callset_aliquot_edge_file) as f:
for line in f:
# should be json
callsetfor = json.loads(line)
assert callsetfor['from'].startswith('SomaticCallset:MC3:'), 'from should be a callset'
assert callsetfor['to'].startswith('Aliquot:'), 'to should be an aliquot'
# validate vertex for all edges exist
helpers.assert_edge_joins_valid(
all_files,
exclude_labels=['Aliquot']
)
return all_files
def test_simple(helpers, maf_file, emitter_directory, id_lookup_path, project_lookup_path):
""" simple test """
validate(helpers, maf_file, emitter_directory, id_lookup_path, project_lookup_path)
def test_gz(helpers, gz_file, emitter_directory, id_lookup_path, project_lookup_path):
""" simple test """
validate(helpers, gz_file, emitter_directory, id_lookup_path, project_lookup_path)
def test_no_center(helpers, no_center_file, emitter_directory, id_lookup_path, project_lookup_path):
""" 'Center column' renamed """
validate(helpers, no_center_file, emitter_directory, id_lookup_path, project_lookup_path)
def test_NO_BARCODE(helpers, NO_BARCODE_file, emitter_directory, id_lookup_path, project_lookup_path):
""" no barcode """
validate(helpers, NO_BARCODE_file, emitter_directory, id_lookup_path, project_lookup_path)
| 3,844 | 0 | 23 |
4ea1cc91f42b4147625270f5303b7d41cdb60fba | 2,447 | py | Python | utils/summaries.py | VijayReddy119/small_obstacle_discovery | 0f1324c7591e433a7ffc69832c4421f4cc9a77ad | [
"MIT"
] | 4 | 2021-04-07T03:41:04.000Z | 2022-01-28T14:10:56.000Z | utils/summaries.py | VijayReddy119/small_obstacle_discovery | 0f1324c7591e433a7ffc69832c4421f4cc9a77ad | [
"MIT"
] | 2 | 2022-02-11T02:11:26.000Z | 2022-02-14T03:22:38.000Z | utils/summaries.py | VijayReddy119/small_obstacle_discovery | 0f1324c7591e433a7ffc69832c4421f4cc9a77ad | [
"MIT"
] | 2 | 2021-04-06T08:43:36.000Z | 2021-04-24T13:27:19.000Z | import os
import torch
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter
from dataloaders.utils import decode_seg_map_sequence, decode_confidence_map_sequence
# def visualize_image(self, writer, dataset, image, target, output,
# global_step, flag='imviz'):
# grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)
# writer.add_image(flag+'/Image', grid_image, global_step)
# grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy(),
# dataset=dataset), 3, normalize=False, range=(0, 255))
# writer.add_image(flag+'/Predicted label', grid_image, global_step)
# grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(),
# dataset=dataset), 3, normalize=False, range=(0, 255))
# writer.add_image(flag+'/Groundtruth label', grid_image, global_step)
| 64.394737 | 116 | 0.630977 | import os
import torch
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter
from dataloaders.utils import decode_seg_map_sequence, decode_confidence_map_sequence
class TensorboardSummary(object):
def __init__(self, directory):
self.directory = directory
def create_summary(self):
writer = SummaryWriter(logdir=os.path.join(self.directory))
return writer
# def visualize_image(self, writer, dataset, image, target, output,
# global_step, flag='imviz'):
# grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)
# writer.add_image(flag+'/Image', grid_image, global_step)
# grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy(),
# dataset=dataset), 3, normalize=False, range=(0, 255))
# writer.add_image(flag+'/Predicted label', grid_image, global_step)
# grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(),
# dataset=dataset), 3, normalize=False, range=(0, 255))
# writer.add_image(flag+'/Groundtruth label', grid_image, global_step)
def visualize_image(self, writer, dataset, image, target, output, conf,
global_step,num_image=3, flag='imviz'):
grid_image = make_grid(decode_confidence_map_sequence(conf[:num_image].detach().cpu().numpy()), num_image,
normalize=False, range=(0, 255))
writer.add_image(flag+'/Conf', grid_image, global_step)
grid_image = make_grid(image[:num_image].clone().cpu().data, num_image, normalize=True)
writer.add_image(flag+'/Image', grid_image, global_step)
grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:num_image], 1)[1].detach().cpu().numpy(),
dataset=dataset), num_image, normalize=False, range=(0, 255))
writer.add_image(flag+'/Predicted label', grid_image, global_step)
grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:num_image], 1).detach().cpu().numpy(),
dataset=dataset), num_image, normalize=False, range=(0, 255))
writer.add_image(flag+'/Groundtruth label', grid_image, global_step)
| 1,272 | 12 | 102 |
0651f2613ad351afc15d1c77ead2c1cb708039dc | 415 | py | Python | config.py | mwek/kidconnect-ifttt | 80bdc403235c35d1ef966ac2a036e4296feba4b3 | [
"MIT"
] | null | null | null | config.py | mwek/kidconnect-ifttt | 80bdc403235c35d1ef966ac2a036e4296feba4b3 | [
"MIT"
] | null | null | null | config.py | mwek/kidconnect-ifttt | 80bdc403235c35d1ef966ac2a036e4296feba4b3 | [
"MIT"
] | null | null | null | from pathlib import Path
KIDCONNECT_LOGIN="your_email@kidconnect.pl"
KIDCONNECT_PASSWORD="YourPassword12345"
IFTTT_KEY="Get it from https://ifttt.com/services/maker_webhooks/settings"
HISTORY_FILE=Path(__file__).parent.joinpath('history.json') # Where to store the news history (so you don't get double-notified)"
CONVERSATIONS={} # {id: title} map for tracked conversations. Get the ID from the KidConnect URL.
| 51.875 | 130 | 0.79759 | from pathlib import Path
KIDCONNECT_LOGIN="your_email@kidconnect.pl"
KIDCONNECT_PASSWORD="YourPassword12345"
IFTTT_KEY="Get it from https://ifttt.com/services/maker_webhooks/settings"
HISTORY_FILE=Path(__file__).parent.joinpath('history.json') # Where to store the news history (so you don't get double-notified)"
CONVERSATIONS={} # {id: title} map for tracked conversations. Get the ID from the KidConnect URL.
| 0 | 0 | 0 |
81c27deeb315631008eeed9e55bc3494c3af4db5 | 19,946 | py | Python | stages/views/export.py | mohamedba01/RH_SOlution_-StagePFE | 0638b889f4fb75e714a470d18907720fa37b2d14 | [
"Unlicense"
] | null | null | null | stages/views/export.py | mohamedba01/RH_SOlution_-StagePFE | 0638b889f4fb75e714a470d18907720fa37b2d14 | [
"Unlicense"
] | null | null | null | stages/views/export.py | mohamedba01/RH_SOlution_-StagePFE | 0638b889f4fb75e714a470d18907720fa37b2d14 | [
"Unlicense"
] | null | null | null | from collections import OrderedDict
from datetime import date
from tempfile import NamedTemporaryFile
from django.conf import settings
from django.db.models import Q, Sum
from django.http import HttpResponse
from openpyxl import Workbook
from openpyxl.styles import Font
from openpyxl.utils import get_column_letter
from ..models import (
Availability, CorpContact, Corporation, Course, Section, Student, Teacher,
Training,
)
from ..utils import school_year_start
openxml_contenttype = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
EXPORT_FIELDS = [
# Student fields
('ID externe', 'student__ext_id'),
('Prénom', 'student__first_name'), ('Nom', 'student__last_name'),
('Titre', 'student__gender'),
('Classe', 'student__klass__name'),
('Filière', 'student__klass__section__name'),
('Rue élève', 'student__street'),
('NPA_élève', 'student__pcode'),
('Localité élève', 'student__city'),
('Tél élève', 'student__tel'),
('Email élève', 'student__email'),
('Date de naissance', 'student__birth_date'),
('No AVS', 'student__avs'),
# Stage fields
('Nom de la pratique professionnelle', 'availability__period__title'),
('Début', 'availability__period__start_date'), ('Fin', 'availability__period__end_date'),
('Remarques pratique professionnelle', 'comment'),
('Prénom référent', 'referent__first_name'), ('Nom référent', 'referent__last_name'),
('Courriel référent', 'referent__email'),
('Institution', 'availability__corporation__name'),
('ID externe Inst', 'availability__corporation__ext_id'),
('Rue Inst', 'availability__corporation__street'),
('NPA Inst', 'availability__corporation__pcode'),
('Ville Inst', 'availability__corporation__city'),
('Tél Inst', 'availability__corporation__tel'),
('Domaine', 'availability__domain__name'),
('Remarques Inst', 'availability__comment'),
('Civilité contact', 'availability__contact__civility'),
('Prénom contact', 'availability__contact__first_name'),
('Nom contact', 'availability__contact__last_name'),
('ID externe contact', 'availability__contact__ext_id'),
('Tél contact', 'availability__contact__tel'),
('Courriel contact', 'availability__contact__email'),
('Courriel contact - copie', None),
]
NON_ATTR_EXPORT_FIELDS = [
('Filière', 'period__section__name'),
('Nom de la pratique professionnelle', 'period__title'),
('Début', 'period__start_date'), ('Fin', 'period__end_date'),
('Institution', 'corporation__name'),
('Rue Inst', 'corporation__street'),
('NPA Inst', 'corporation__pcode'),
('Ville Inst', 'corporation__city'),
('Tél Inst', 'corporation__tel'),
('Domaine', 'domain__name'),
('Remarques Inst', 'comment'),
('Civilité contact', 'contact__civility'),
('Prénom contact', 'contact__first_name'),
('Nom contact', 'contact__last_name'),
('Tél contact', 'contact__tel'),
('Courriel contact', 'contact__email'),
('Courriel contact - copie', None),
]
GENERAL_EXPORT_FIELDS = [
('Num_Ele', 'ext_id'),
('Nom_Ele', 'last_name'),
('Prenom_Ele', 'first_name'),
('Genre_Ele', 'gender'),
('Rue_Ele', 'street'),
('NPA_Ele', 'pcode'),
('Ville_Ele', 'city'),
('DateNaissance_Ele', 'birth_date'),
('NOAVS_Ele', 'avs'),
('Canton_Ele', 'district'),
('Email_Ele', 'email'),
('Mobile_Ele', 'mobile'),
('Compte_RPN', 'login_rpn'),
('DispenseCG_Ele', 'dispense_ecg'),
('DispenseEPS_Ele', 'dispense_eps'),
('SoutienDYS_Ele', 'soutien_dys'),
('Classe_Ele', 'klass__name'),
('Filiere_Ele', 'klass__section__name'),
('MaitreDeClasseNom_Ele', 'klass__teacher__last_name'),
('MaitreDeClassePrenom_Ele', 'klass__teacher__first_name'),
('OptionASE_Ele', 'option_ase__name'),
('Num_Emp', 'corporation__ext_id'),
('Nom_Emp', 'corporation__name'),
('Rue_Emp', 'corporation__street'),
('NPA_Emp', 'corporation__pcode'),
('Ville_Emp', 'corporation__city'),
('Canton_Emp', 'corporation__district'),
('Secteur_Emp', 'corporation__sector'),
('Type_EMP', 'corporation__typ'),
('Tel_Emp', 'corporation__tel'),
('Num_Form', 'instructor__ext_id'),
('Titre_Form', 'instructor__civility'),
('Prenom_Form', 'instructor__first_name'),
('Nom_Form', 'instructor__last_name'),
('Tel_Form', 'instructor__tel'),
('Email_Form', 'instructor__email'),
('Num_Form2', 'instructor2__ext_id'),
('Titre_Form2', 'instructor2__civility'),
('Prenom_Form2', 'instructor2__first_name'),
('Nom_Form2', 'instructor2__last_name'),
('Tel_Form2', 'instructor2__tel'),
('Email_Form2', 'instructor2__email'),
('EmailCopie_Form', None),
]
def general_export(request):
"""
Export all current students data
"""
export_fields = OrderedDict(GENERAL_EXPORT_FIELDS)
export = OpenXMLExport('Exportation')
export.write_line(export_fields.keys(), bold=True) # Headers
# Data
query_keys = [f for f in export_fields.values() if f is not None]
query = Student.objects.filter(archived=False).order_by('klass__name', 'last_name', 'first_name')
for line in query.values(*query_keys):
values = []
for field in query_keys:
if field == 'gender':
values.append(('Madame', 'Monsieur')[line[field] == 'M'])
elif field in ('dispense_ecg', 'dispense_eps', 'soutien_dys'):
values.append('Oui' if line[field] is True else '')
else:
values.append(line[field])
export.write_line(values)
return export.get_http_response('general_export')
ORTRA_EXPORT_FIELDS = [
('Num_Ele', 'ext_id'),
('Nom_Ele', 'last_name'),
('Prenom_Ele', 'first_name'),
('Genre_Ele', 'gender'),
('Rue_Ele', 'street'),
('NPA_Ele', 'pcode'),
('Ville_Ele', 'city'),
('DateNaissance_Ele', 'birth_date'),
('Email_Ele', 'email'),
('Mobile_Ele', 'mobile'),
('Classe_Ele', 'klass__name'),
('Filiere_Ele', 'klass__section__name'),
('MaitreDeClasseNom_Ele', 'klass__teacher__last_name'),
('MaitreDeClassePrenom_Ele', 'klass__teacher__first_name'),
('OptionASE_Ele', 'option_ase__name'),
('Num_Emp', 'corporation__ext_id'),
('Nom_Emp', 'corporation__name'),
('Rue_Emp', 'corporation__street'),
('NPA_Emp', 'corporation__pcode'),
('Ville_Emp', 'corporation__city'),
('Tel_Emp', 'corporation__tel'),
('Titre_Form', 'instructor__civility'),
('Prenom_Form', 'instructor__first_name'),
('Nom_Form', 'instructor__last_name'),
('Tel_Form', 'instructor__tel'),
('Email_Form', 'instructor__email'),
]
def ortra_export(request):
"""
Export students data from sections ASAFE, ASEFE and ASSCFE
"""
export_fields = OrderedDict(ORTRA_EXPORT_FIELDS)
export = OpenXMLExport('Exportation')
export.write_line(export_fields.keys(), bold=True) # Headers
# Data
query_keys = [f for f in export_fields.values() if f is not None]
query = Student.objects.filter(Q(klass__name__contains='ASAFE') |
Q(klass__name__contains='ASEFE') |
Q(klass__name__contains='ASSCFE'),
archived=False).order_by('klass__name',
'last_name',
'first_name')
for line in query.values(*query_keys):
values = []
for field in query_keys:
if field == 'gender':
values.append(('Madame', 'Monsieur')[line[field] == 'M'])
else:
values.append(line[field])
export.write_line(values)
return export.get_http_response('ortra_export')
| 40.052209 | 112 | 0.632107 | from collections import OrderedDict
from datetime import date
from tempfile import NamedTemporaryFile
from django.conf import settings
from django.db.models import Q, Sum
from django.http import HttpResponse
from openpyxl import Workbook
from openpyxl.styles import Font
from openpyxl.utils import get_column_letter
from ..models import (
Availability, CorpContact, Corporation, Course, Section, Student, Teacher,
Training,
)
from ..utils import school_year_start
openxml_contenttype = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
class OpenXMLExport:
def __init__(self, sheet_title):
self.wb = Workbook()
self.ws = self.wb.active
self.ws.title = sheet_title
self.bold = Font(bold=True)
self.row_idx = 1
def write_line(self, values, bold=False, col_widths=()):
for col_idx, value in enumerate(values, start=1):
cell = self.ws.cell(row=self.row_idx, column=col_idx)
try:
cell.value = value
except KeyError:
# Ugly workaround for https://bugs.python.org/issue28969
from openpyxl.utils.datetime import to_excel
to_excel.cache_clear()
cell.value = value
if bold:
cell.font = self.bold
if col_widths:
self.ws.column_dimensions[get_column_letter(col_idx)].width = col_widths[col_idx - 1]
self.row_idx += 1
def get_http_response(self, filename_base):
with NamedTemporaryFile() as tmp:
self.wb.save(tmp.name)
tmp.seek(0)
response = HttpResponse(tmp, content_type=openxml_contenttype)
response['Content-Disposition'] = 'attachment; filename=%s_%s.xlsx' % (
filename_base, date.strftime(date.today(), '%Y-%m-%d')
)
return response
EXPORT_FIELDS = [
# Student fields
('ID externe', 'student__ext_id'),
('Prénom', 'student__first_name'), ('Nom', 'student__last_name'),
('Titre', 'student__gender'),
('Classe', 'student__klass__name'),
('Filière', 'student__klass__section__name'),
('Rue élève', 'student__street'),
('NPA_élève', 'student__pcode'),
('Localité élève', 'student__city'),
('Tél élève', 'student__tel'),
('Email élève', 'student__email'),
('Date de naissance', 'student__birth_date'),
('No AVS', 'student__avs'),
# Stage fields
('Nom de la pratique professionnelle', 'availability__period__title'),
('Début', 'availability__period__start_date'), ('Fin', 'availability__period__end_date'),
('Remarques pratique professionnelle', 'comment'),
('Prénom référent', 'referent__first_name'), ('Nom référent', 'referent__last_name'),
('Courriel référent', 'referent__email'),
('Institution', 'availability__corporation__name'),
('ID externe Inst', 'availability__corporation__ext_id'),
('Rue Inst', 'availability__corporation__street'),
('NPA Inst', 'availability__corporation__pcode'),
('Ville Inst', 'availability__corporation__city'),
('Tél Inst', 'availability__corporation__tel'),
('Domaine', 'availability__domain__name'),
('Remarques Inst', 'availability__comment'),
('Civilité contact', 'availability__contact__civility'),
('Prénom contact', 'availability__contact__first_name'),
('Nom contact', 'availability__contact__last_name'),
('ID externe contact', 'availability__contact__ext_id'),
('Tél contact', 'availability__contact__tel'),
('Courriel contact', 'availability__contact__email'),
('Courriel contact - copie', None),
]
NON_ATTR_EXPORT_FIELDS = [
('Filière', 'period__section__name'),
('Nom de la pratique professionnelle', 'period__title'),
('Début', 'period__start_date'), ('Fin', 'period__end_date'),
('Institution', 'corporation__name'),
('Rue Inst', 'corporation__street'),
('NPA Inst', 'corporation__pcode'),
('Ville Inst', 'corporation__city'),
('Tél Inst', 'corporation__tel'),
('Domaine', 'domain__name'),
('Remarques Inst', 'comment'),
('Civilité contact', 'contact__civility'),
('Prénom contact', 'contact__first_name'),
('Nom contact', 'contact__last_name'),
('Tél contact', 'contact__tel'),
('Courriel contact', 'contact__email'),
('Courriel contact - copie', None),
]
def stages_export(request, scope=None):
period_filter = request.GET.get('period')
non_attributed = bool(int(request.GET.get('non_attr', 0)))
export_fields = OrderedDict(EXPORT_FIELDS)
contact_test_field = 'availability__contact__last_name'
corp_name_field = 'availability__corporation__name'
if period_filter:
if non_attributed:
# Export non attributed availabilities for a specific period
query = Availability.objects.filter(period_id=period_filter, training__isnull=True)
export_fields = OrderedDict(NON_ATTR_EXPORT_FIELDS)
contact_test_field = 'contact__last_name'
corp_name_field = 'corporation__name'
else:
# Export trainings for a specific period
query = Training.objects.filter(availability__period_id=period_filter)
else:
if scope and scope == 'all':
# Export all trainings in the database
query = Training.objects.all()
else:
query = Training.objects.filter(availability__period__end_date__gt=school_year_start())
# Prepare "default" contacts (when not defined on training)
section_names = Section.objects.all().values_list('name', flat=True)
default_contacts = dict(
(c, {s: '' for s in section_names})
for c in Corporation.objects.all().values_list('name', flat=True)
)
always_ccs = dict(
(c, {s: [] for s in section_names})
for c in Corporation.objects.all().values_list('name', flat=True)
)
for contact in CorpContact.objects.filter(corporation__isnull=False
).select_related('corporation'
).prefetch_related('sections').order_by('corporation'):
for section in contact.sections.all():
if not default_contacts[contact.corporation.name][section.name] or contact.is_main is True:
default_contacts[contact.corporation.name][section.name] = contact
if contact.always_cc:
always_ccs[contact.corporation.name][section.name].append(contact)
if contact.is_main:
for sname in section_names:
if not default_contacts[contact.corporation.name][sname]:
default_contacts[contact.corporation.name][sname] = contact
export = OpenXMLExport('Pratiques professionnelles')
export.write_line(export_fields.keys(), bold=True) # Headers
# Data
query_keys = [f for f in export_fields.values() if f is not None]
for line in query.values(*query_keys):
values = []
for field in query_keys:
value = line[field]
if 'gender' in field:
value = {'F': 'Madame', 'M': 'Monsieur', '': ''}[value]
values.append(value)
if line[contact_test_field] is None:
# Use default contact
contact = default_contacts.get(line[corp_name_field], {}).get(line[export_fields['Filière']])
if contact:
values = values[:-6] + [
contact.civility, contact.first_name, contact.last_name, contact.ext_id,
contact.tel, contact.email
]
if always_ccs[line[corp_name_field]].get(line[export_fields['Filière']]):
values.append("; ".join(
[c.email for c in always_ccs[line[corp_name_field]].get(line[export_fields['Filière']])]
))
export.write_line(values)
return export.get_http_response('pp_export')
def _ratio_Ede_Ase_Assc():
# Spliting for unattribued periods
tot_edeps = Course.objects.filter(imputation='EDEps').aggregate(Sum('period'))['period__sum'] or 0
tot_edepe = Course.objects.filter(imputation='EDEpe').aggregate(Sum('period'))['period__sum'] or 0
edepe_ratio = 1 if tot_edepe + tot_edeps == 0 else tot_edepe / (tot_edepe + tot_edeps)
tot_asefe = Course.objects.filter(imputation='ASEFE').aggregate(Sum('period'))['period__sum'] or 0
tot_mpts = Course.objects.filter(imputation='MPTS').aggregate(Sum('period'))['period__sum'] or 0
asefe_ratio = 1 if tot_asefe + tot_mpts == 0 else tot_asefe / (tot_asefe + tot_mpts)
tot_asscfe = Course.objects.filter(imputation='ASSCFE').aggregate(Sum('period'))['period__sum'] or 0
tot_mps = Course.objects.filter(imputation='MPS').aggregate(Sum('period'))['period__sum'] or 0
asscfe_ratio = 1 if tot_asscfe + tot_mps == 0 else tot_asscfe / (tot_asscfe + tot_mps)
return {'edepe': edepe_ratio, 'asefe': asefe_ratio, 'asscfe': asscfe_ratio}
def imputations_export(request):
IMPUTATIONS_EXPORT_FIELDS = [
'Nom', 'Prénom', 'Report passé', 'Ens', 'Discipline',
'Accomp.', 'Discipline', 'Total payé', 'Indice', 'Taux', 'Report futur',
'ASA', 'ASSC', 'ASE', 'MPTS', 'MPS', 'EDEpe', 'EDEps', 'EDS', 'CAS_FPP'
]
ratios = _ratio_Ede_Ase_Assc()
export = OpenXMLExport('Imputations')
export.write_line(IMPUTATIONS_EXPORT_FIELDS, bold=True) # Headers
for teacher in Teacher.objects.filter(archived=False):
activities, imputations = teacher.calc_imputations(ratios)
values = [
teacher.last_name, teacher.first_name, teacher.previous_report,
activities['tot_ens'], 'Ens. prof.', activities['tot_mandats'] + activities['tot_formation'],
'Accompagnement', activities['tot_paye'], 'Charge globale',
'{0:.2f}'.format(activities['tot_paye']/settings.GLOBAL_CHARGE_PERCENT),
teacher.next_report,
]
values.extend(imputations.values())
export.write_line(values)
return export.get_http_response('Imputations_export')
def export_sap(request):
EXPORT_SAP_HEADERS = [
'PERNR', 'PERNOM', 'DEGDA', 'ENDDA', 'ZNOM', 'ZUND',
'ZACT', 'ZBRA', 'ZOTP', 'ZCCO', 'ZORD', 'ZTAUX',
]
MAPPING_OTP = {
'ASAFE': 'CIFO01.03.02.03.01.02 - ASA EE',
'ASEFE': 'CIFO01.03.02.04.01.02 - CFC ASE EE',
'ASSCFE': 'CIFO01.03.02.04.02.02 - CFC ASSC EE',
'EDEpe': 'CIFO01.03.02.07.01.01 - EDE prat. prof. PT',
'EDEps': 'CIFO01.03.02.07.02.01 - EDE stages PT',
'EDS': 'CIFO01.03.02.07.03.02 - EDS EE',
'CAS_FPP': 'CIFO01.03.02.01.03 - Mandats divers (CAS FPP)',
'MPTS' : 'CIFO01.04.03.06.02.01 - MPTS ASE',
'MPS': 'CIFO01.04.03.06.03.01 - MPS Santé',
}
ratios = _ratio_Ede_Ase_Assc()
export = OpenXMLExport('Imputations')
export.write_line(EXPORT_SAP_HEADERS, bold=True) # Headers
start_date = '20.08.2018'
end_date = '19.08.2019'
indice = 'charge globale'
type_act = 'Ens. prof.'
branche = 'Ens. prof.'
centre_cout = ''
stat = ''
for teacher in Teacher.objects.filter(archived=False):
activities, imputations = teacher.calc_imputations(ratios)
for key in imputations:
if imputations[key] > 0:
values = [
teacher.ext_id, teacher.full_name, start_date, end_date, imputations[key], indice, type_act,
branche, MAPPING_OTP[key], centre_cout, stat,
round(imputations[key] / settings.GLOBAL_CHARGE_PERCENT, 2),
]
export.write_line(values)
# Previous report
values = [
teacher.ext_id, teacher.full_name, start_date, end_date, teacher.previous_report, indice, type_act,
branche, 'Report précédent', centre_cout, stat,
round(teacher.previous_report / settings.GLOBAL_CHARGE_PERCENT, 2),
]
export.write_line(values)
# Next report
values = [
teacher.ext_id, teacher.full_name, start_date, end_date, teacher.next_report, indice, type_act,
branche, 'Report suivant', centre_cout, stat,
round(teacher.next_report / settings.GLOBAL_CHARGE_PERCENT, 2),
]
export.write_line(values)
return export.get_http_response('Export_SAP')
GENERAL_EXPORT_FIELDS = [
('Num_Ele', 'ext_id'),
('Nom_Ele', 'last_name'),
('Prenom_Ele', 'first_name'),
('Genre_Ele', 'gender'),
('Rue_Ele', 'street'),
('NPA_Ele', 'pcode'),
('Ville_Ele', 'city'),
('DateNaissance_Ele', 'birth_date'),
('NOAVS_Ele', 'avs'),
('Canton_Ele', 'district'),
('Email_Ele', 'email'),
('Mobile_Ele', 'mobile'),
('Compte_RPN', 'login_rpn'),
('DispenseCG_Ele', 'dispense_ecg'),
('DispenseEPS_Ele', 'dispense_eps'),
('SoutienDYS_Ele', 'soutien_dys'),
('Classe_Ele', 'klass__name'),
('Filiere_Ele', 'klass__section__name'),
('MaitreDeClasseNom_Ele', 'klass__teacher__last_name'),
('MaitreDeClassePrenom_Ele', 'klass__teacher__first_name'),
('OptionASE_Ele', 'option_ase__name'),
('Num_Emp', 'corporation__ext_id'),
('Nom_Emp', 'corporation__name'),
('Rue_Emp', 'corporation__street'),
('NPA_Emp', 'corporation__pcode'),
('Ville_Emp', 'corporation__city'),
('Canton_Emp', 'corporation__district'),
('Secteur_Emp', 'corporation__sector'),
('Type_EMP', 'corporation__typ'),
('Tel_Emp', 'corporation__tel'),
('Num_Form', 'instructor__ext_id'),
('Titre_Form', 'instructor__civility'),
('Prenom_Form', 'instructor__first_name'),
('Nom_Form', 'instructor__last_name'),
('Tel_Form', 'instructor__tel'),
('Email_Form', 'instructor__email'),
('Num_Form2', 'instructor2__ext_id'),
('Titre_Form2', 'instructor2__civility'),
('Prenom_Form2', 'instructor2__first_name'),
('Nom_Form2', 'instructor2__last_name'),
('Tel_Form2', 'instructor2__tel'),
('Email_Form2', 'instructor2__email'),
('EmailCopie_Form', None),
]
def general_export(request):
"""
Export all current students data
"""
export_fields = OrderedDict(GENERAL_EXPORT_FIELDS)
export = OpenXMLExport('Exportation')
export.write_line(export_fields.keys(), bold=True) # Headers
# Data
query_keys = [f for f in export_fields.values() if f is not None]
query = Student.objects.filter(archived=False).order_by('klass__name', 'last_name', 'first_name')
for line in query.values(*query_keys):
values = []
for field in query_keys:
if field == 'gender':
values.append(('Madame', 'Monsieur')[line[field] == 'M'])
elif field in ('dispense_ecg', 'dispense_eps', 'soutien_dys'):
values.append('Oui' if line[field] is True else '')
else:
values.append(line[field])
export.write_line(values)
return export.get_http_response('general_export')
ORTRA_EXPORT_FIELDS = [
('Num_Ele', 'ext_id'),
('Nom_Ele', 'last_name'),
('Prenom_Ele', 'first_name'),
('Genre_Ele', 'gender'),
('Rue_Ele', 'street'),
('NPA_Ele', 'pcode'),
('Ville_Ele', 'city'),
('DateNaissance_Ele', 'birth_date'),
('Email_Ele', 'email'),
('Mobile_Ele', 'mobile'),
('Classe_Ele', 'klass__name'),
('Filiere_Ele', 'klass__section__name'),
('MaitreDeClasseNom_Ele', 'klass__teacher__last_name'),
('MaitreDeClassePrenom_Ele', 'klass__teacher__first_name'),
('OptionASE_Ele', 'option_ase__name'),
('Num_Emp', 'corporation__ext_id'),
('Nom_Emp', 'corporation__name'),
('Rue_Emp', 'corporation__street'),
('NPA_Emp', 'corporation__pcode'),
('Ville_Emp', 'corporation__city'),
('Tel_Emp', 'corporation__tel'),
('Titre_Form', 'instructor__civility'),
('Prenom_Form', 'instructor__first_name'),
('Nom_Form', 'instructor__last_name'),
('Tel_Form', 'instructor__tel'),
('Email_Form', 'instructor__email'),
]
def ortra_export(request):
"""
Export students data from sections ASAFE, ASEFE and ASSCFE
"""
export_fields = OrderedDict(ORTRA_EXPORT_FIELDS)
export = OpenXMLExport('Exportation')
export.write_line(export_fields.keys(), bold=True) # Headers
# Data
query_keys = [f for f in export_fields.values() if f is not None]
query = Student.objects.filter(Q(klass__name__contains='ASAFE') |
Q(klass__name__contains='ASEFE') |
Q(klass__name__contains='ASSCFE'),
archived=False).order_by('klass__name',
'last_name',
'first_name')
for line in query.values(*query_keys):
values = []
for field in query_keys:
if field == 'gender':
values.append(('Madame', 'Monsieur')[line[field] == 'M'])
else:
values.append(line[field])
export.write_line(values)
return export.get_http_response('ortra_export')
def export_qualification(request, section='ede'):
headers = [
'Classe', 'Etudiant-e',
'Référent pratique', 'Titre TD', 'Résumé TD', 'Ens. référent',
'Mentor',
'Session', 'Type', 'Exp_int.',
'Expert ext. Civilité', 'Expert ext. Nom', 'Expert ext. Adresse', 'Expert ext. Localité',
'Date', 'Salle', 'Note',
]
export_name = 'Export_qualif_%s' % section.upper()
export = OpenXMLExport(export_name)
export.write_line(headers, bold=True)
# Data
empty_values = [''] * 7
for student in Student.objects.filter(klass__name__startswith='3%s' % section.upper(), archived=False
).select_related('klass', 'referent', 'training_referent', 'mentor',
).prefetch_related('examination_set'
).order_by('klass__name', 'last_name'):
stud_values = [
student.klass.name,
student.full_name,
student.training_referent.full_name if student.training_referent else '',
student.title,
student.subject,
student.referent.full_name if student.referent else '',
student.mentor.full_name if student.mentor else '',
]
lines_exported = 0
for exam in student.examination_set.all():
exam_values = [
str(exam.session),
exam.get_type_exam_display(),
exam.internal_expert.full_name if exam.internal_expert else '',
exam.external_expert.civility if exam.external_expert else '',
exam.external_expert.full_name if exam.external_expert else '',
exam.external_expert.street if exam.external_expert else '',
exam.external_expert.pcode_city if exam.external_expert else '',
exam.date_exam,
exam.room,
exam.mark,
]
if lines_exported == 0:
export.write_line(stud_values + exam_values)
else:
export.write_line(empty_values + exam_values)
lines_exported += 1
if lines_exported == 0:
export.write_line(stud_values)
return export.get_http_response(export_name)
def institutions_export(request):
def format_value(val):
return '' if val is None else str(val)
fields = [
(f.verbose_name, f.name)
for f in Corporation._meta.get_fields() if hasattr(f, 'verbose_name') and f.name not in ('archived',)
]
headers = [f[0] for f in fields]
export = OpenXMLExport('Institutions')
export.write_line(headers, bold=True)
for corp in Corporation.objects.filter(archived=False).order_by('name'):
values = [format_value(getattr(corp, f[1])) for f in fields]
export.write_line(values)
return export.get_http_response('Institutions')
| 11,893 | -1 | 241 |
2ae1173b35126362da198a461c119458fb5cad25 | 14,639 | py | Python | approx-gradient/blocks.py | USC-Melady/ICLR2020-PADGN | 507e04228291cb16c2163dee125c1688894f26f0 | [
"MIT"
] | 47 | 2019-12-25T07:35:23.000Z | 2022-03-24T01:42:56.000Z | approx-gradient/blocks.py | USC-Melady/ICLR2020-PADGN | 507e04228291cb16c2163dee125c1688894f26f0 | [
"MIT"
] | null | null | null | approx-gradient/blocks.py | USC-Melady/ICLR2020-PADGN | 507e04228291cb16c2163dee125c1688894f26f0 | [
"MIT"
] | 12 | 2020-02-19T02:03:42.000Z | 2021-08-22T23:55:51.000Z | import torch
import torch.nn as nn
from torch_scatter import scatter_add, scatter_max, scatter_mean, scatter_min, scatter_mul
from utils import decompose_graph
LATENT_SIZE = 32
class GlobalBlock(nn.Module):
"""Global block, f_g.
A block that updates the global features of each graph based on
the previous global features, the aggregated features of the
edges of the graph, and the aggregated features of the nodes of the graph.
"""
class EdgeBlock(nn.Module):
"""Edge block, f_e.
Update the features of each edge based on the previous edge features,
the features of the adjacent nodes, and the global features.
"""
class NodeBlock(nn.Module):
"""Node block, f_v.
Update the features of each node based on the previous node features,
the aggregated features of the received edges,
the aggregated features of the sent edges, and the global features.
"""
def __init__(self,
in_features,
out_features,
use_nodes=True,
use_sent_edges=False,
use_received_edges=True,
use_globals=True,
sent_edges_reducer=scatter_add,
received_edges_reducer=scatter_add,
custom_func=None):
"""Initialization of the NodeBlock module.
Args:
in_features: Input dimension.
If node, 2*edge(sent, received), and global are used, d_v+(2*d_e)+d_g.
h'_i = f_v(h_i, AGG(h_ij), AGG(h_ji), u)
out_features: Output dimension.
h'_i will have the dimension.
use_nodes: Whether to condition on node attributes.
use_sent_edges: Whether to condition on sent edges attributes.
use_received_edges: Whether to condition on received edges attributes.
use_globals: Whether to condition on the global attributes.
reducer: Aggregator. scatter_* [add, mul, max, min, mean]
"""
super(NodeBlock, self).__init__()
if not (use_nodes or use_sent_edges or use_received_edges or use_globals):
raise ValueError("At least one of use_received_edges, use_sent_edges, "
"use_nodes or use_globals must be True.")
self._use_nodes = use_nodes
self._use_sent_edges = use_sent_edges
self._use_received_edges = use_received_edges
self._use_globals = use_globals
self._sent_edges_reducer = sent_edges_reducer
self._received_edges_reducer = received_edges_reducer
# f_v() is a function: R^in_features -> R^out_features
if custom_func:
# Customized function can be used for self.net instead of deafult function.
# It is highly recommended to use nn.Sequential() type.
self.net = custom_func
else:
self.net = nn.Sequential(nn.Linear(in_features, LATENT_SIZE),
nn.ReLU(),
nn.Linear(LATENT_SIZE, out_features),
)
class NodeBlockInd(NodeBlock):
"""Node-level feature transformation.
Each node is considered independently. (No edge is considered.)
Args:
in_features: input dimension of node representations.
out_features: output dimension of node representations.
(node embedding size)
(N^v, d_v) -> (N^v, out_features)
NodeBlockInd(graph) -> updated graph
"""
class EdgeBlockInd(EdgeBlock):
"""Edge-level feature transformation.
Each edge is considered independently. (No node is considered.)
Args:
in_features: input dimension of edge representations.
out_features: output dimension of edge representations.
(edge embedding size)
(N^e, d_e) -> (N^e, out_features)
EdgeBlockInd(graph) -> updated graph
"""
class GlobalBlockInd(GlobalBlock):
"""Global-level feature transformation.
No edge/node is considered.
Args:
in_features: input dimension of global representations.
out_features: output dimension of global representations.
(global embedding size)
(1, d_g) -> (1, out_features)
GlobalBlockInd(graph) -> updated graph
"""
| 39.246649 | 126 | 0.548603 | import torch
import torch.nn as nn
from torch_scatter import scatter_add, scatter_max, scatter_mean, scatter_min, scatter_mul
from utils import decompose_graph
LATENT_SIZE = 32
class GlobalBlock(nn.Module):
"""Global block, f_g.
A block that updates the global features of each graph based on
the previous global features, the aggregated features of the
edges of the graph, and the aggregated features of the nodes of the graph.
"""
def __init__(self,
in_features,
out_features,
use_edges=True,
use_nodes=True,
use_globals=True,
edge_reducer=scatter_mean,
node_reducer=scatter_mean,
custom_func=None,
device='cpu'):
super(GlobalBlock, self).__init__()
if not (use_nodes or use_edges or use_globals):
raise ValueError("At least one of use_edges, "
"use_nodes or use_globals must be True.")
self._use_edges = use_edges # not need to differentiate sent/received edges.
self._use_nodes = use_nodes
self._use_globals = use_globals
self._edge_reducer = edge_reducer
self._node_reducer = node_reducer
self.device = device
# f_g is a function R^in_features -> R^out_features
if custom_func:
# Customized function can be used for self.net instead of deafult function.
# It is highly recommended to use nn.Sequential() type.
self.net = custom_func
else:
self.net = nn.Sequential(nn.Linear(in_features, LATENT_SIZE),
nn.ReLU(),
nn.Linear(LATENT_SIZE, out_features),
)
def forward(self, graph):
# Decompose graph
node_attr, edge_index, edge_attr, global_attr = decompose_graph(graph)
senders_idx, receivers_idx = edge_index
num_edges = graph.num_edges
num_nodes = graph.num_nodes
globals_to_collect = []
if self._use_globals:
globals_to_collect.append(global_attr) # global_attr.shape=(1, d_g)
if self._use_edges:
# no need to differentiate sent/received edges.
try:
agg_edges = self._edge_reducer(edge_attr, torch.zeros(num_edges, dtype=torch.long, device=self.device), dim=0)
except:
raise ValueError("reducer should be one of scatter_* [add, mul, max, min, mean]")
globals_to_collect.append(agg_edges)
if self._use_nodes:
try:
agg_nodes = self._node_reducer(node_attr, torch.zeros(num_nodes, dtype=torch.long, device=self.device), dim=0)
except:
raise ValueError("reducer should be one of scatter_* [add, mul, max, min, mean]")
globals_to_collect.append(agg_nodes)
collected_globals = torch.cat(globals_to_collect, dim=-1)
graph.global_attr = self.net(collected_globals) # Update
return graph
class EdgeBlock(nn.Module):
"""Edge block, f_e.
Update the features of each edge based on the previous edge features,
the features of the adjacent nodes, and the global features.
"""
def __init__(self,
in_features,
out_features,
use_edges=True,
use_sender_nodes=True,
use_receiver_nodes=True,
use_globals=True,
custom_func=None):
super(EdgeBlock, self).__init__()
if not (use_edges or use_sender_nodes or use_receiver_nodes or use_globals):
raise ValueError("At least one of use_edges, use_sender_nodes, "
"use_receiver_nodes or use_globals must be True.")
self._use_edges = use_edges
self._use_sender_nodes = use_sender_nodes
self._use_receiver_nodes = use_receiver_nodes
self._use_globals = use_globals
# f_e() is a function: R^in_features -> R^out_features
if custom_func:
# Customized function can be used for self.net instead of deafult function.
# It is highly recommended to use nn.Sequential() type.
self.net = custom_func
else:
self.net = nn.Sequential(nn.Linear(in_features, LATENT_SIZE),
nn.ReLU(),
nn.Linear(LATENT_SIZE, out_features),
)
def forward(self, graph):
# Decompose graph
node_attr, edge_index, edge_attr, global_attr = decompose_graph(graph)
senders_idx, receivers_idx = edge_index
num_edges = graph.num_edges
edges_to_collect = []
if self._use_edges:
edges_to_collect.append(edge_attr)
if self._use_sender_nodes:
senders_attr = node_attr[senders_idx, :]
edges_to_collect.append(senders_attr)
if self._use_receiver_nodes:
receivers_attr = node_attr[receivers_idx, :]
edges_to_collect.append(receivers_attr)
if self._use_globals:
expanded_global_attr = global_attr.expand(num_edges, global_attr.shape[1])
edges_to_collect.append(expanded_global_attr)
collected_edges = torch.cat(edges_to_collect, dim=-1)
graph.edge_attr = self.net(collected_edges) # Update
return graph
class NodeBlock(nn.Module):
"""Node block, f_v.
Update the features of each node based on the previous node features,
the aggregated features of the received edges,
the aggregated features of the sent edges, and the global features.
"""
def __init__(self,
in_features,
out_features,
use_nodes=True,
use_sent_edges=False,
use_received_edges=True,
use_globals=True,
sent_edges_reducer=scatter_add,
received_edges_reducer=scatter_add,
custom_func=None):
"""Initialization of the NodeBlock module.
Args:
in_features: Input dimension.
If node, 2*edge(sent, received), and global are used, d_v+(2*d_e)+d_g.
h'_i = f_v(h_i, AGG(h_ij), AGG(h_ji), u)
out_features: Output dimension.
h'_i will have the dimension.
use_nodes: Whether to condition on node attributes.
use_sent_edges: Whether to condition on sent edges attributes.
use_received_edges: Whether to condition on received edges attributes.
use_globals: Whether to condition on the global attributes.
reducer: Aggregator. scatter_* [add, mul, max, min, mean]
"""
super(NodeBlock, self).__init__()
if not (use_nodes or use_sent_edges or use_received_edges or use_globals):
raise ValueError("At least one of use_received_edges, use_sent_edges, "
"use_nodes or use_globals must be True.")
self._use_nodes = use_nodes
self._use_sent_edges = use_sent_edges
self._use_received_edges = use_received_edges
self._use_globals = use_globals
self._sent_edges_reducer = sent_edges_reducer
self._received_edges_reducer = received_edges_reducer
# f_v() is a function: R^in_features -> R^out_features
if custom_func:
# Customized function can be used for self.net instead of deafult function.
# It is highly recommended to use nn.Sequential() type.
self.net = custom_func
else:
self.net = nn.Sequential(nn.Linear(in_features, LATENT_SIZE),
nn.ReLU(),
nn.Linear(LATENT_SIZE, out_features),
)
def forward(self, graph):
# Decompose graph
node_attr, edge_index, edge_attr, global_attr = decompose_graph(graph)
senders_idx, receivers_idx = edge_index
num_nodes = graph.num_nodes
nodes_to_collect = []
if self._use_nodes:
nodes_to_collect.append(node_attr)
if self._use_sent_edges:
try:
agg_sent_edges = self._sent_edges_reducer(edge_attr, senders_idx, dim=0, dim_size=num_nodes)
except:
raise ValueError("reducer should be one of scatter_* [add, mul, max, min, mean]")
nodes_to_collect.append(agg_sent_edges)
if self._use_received_edges:
try:
agg_received_edges = self._received_edges_reducer(edge_attr, receivers_idx, dim=0, dim_size=num_nodes)
except:
raise ValueError("reducer should be one of scatter_* [add, mul, max, min, mean]")
nodes_to_collect.append(agg_received_edges)
if self._use_globals:
expanded_global_attr = global_attr.expand(num_nodes, global_attr.shape[1])
nodes_to_collect.append(expanded_global_attr)
collected_nodes = torch.cat(nodes_to_collect, dim=-1)
graph.x = self.net(collected_nodes) # Update
return graph
class NodeBlockInd(NodeBlock):
"""Node-level feature transformation.
Each node is considered independently. (No edge is considered.)
Args:
in_features: input dimension of node representations.
out_features: output dimension of node representations.
(node embedding size)
(N^v, d_v) -> (N^v, out_features)
NodeBlockInd(graph) -> updated graph
"""
def __init__(self,
in_features,
out_features,
hidden_features=32,
custom_func=None):
super(NodeBlockInd, self).__init__(in_features,
out_features,
use_nodes=True,
use_sent_edges=False,
use_received_edges=False,
use_globals=False,
sent_edges_reducer=None,
received_edges_reducer=None,
custom_func=custom_func)
# Customized function
if custom_func:
# Customized function can be used for self.net instead of deafult function.
# It is highly recommended to use nn.Sequential() type.
self.net = custom_func
else:
self.hidden_features = hidden_features
self.net = nn.Sequential(nn.Linear(in_features, self.hidden_features),
nn.ReLU(),
nn.Linear(self.hidden_features, out_features),
)
class EdgeBlockInd(EdgeBlock):
"""Edge-level feature transformation.
Each edge is considered independently. (No node is considered.)
Args:
in_features: input dimension of edge representations.
out_features: output dimension of edge representations.
(edge embedding size)
(N^e, d_e) -> (N^e, out_features)
EdgeBlockInd(graph) -> updated graph
"""
def __init__(self,
in_features,
out_features,
hidden_features=32,
custom_func=None):
super(EdgeBlockInd, self).__init__(in_features,
out_features,
use_edges=True,
use_sender_nodes=False,
use_receiver_nodes=False,
use_globals=False,
custom_func=custom_func)
# Customized function
if custom_func:
# Customized function can be used for self.net instead of deafult function.
# It is highly recommended to use nn.Sequential() type.
self.net = custom_func
else:
self.hidden_features = hidden_features
self.net = nn.Sequential(nn.Linear(in_features, self.hidden_features),
nn.ReLU(),
nn.Linear(self.hidden_features, out_features),
)
class GlobalBlockInd(GlobalBlock):
"""Global-level feature transformation.
No edge/node is considered.
Args:
in_features: input dimension of global representations.
out_features: output dimension of global representations.
(global embedding size)
(1, d_g) -> (1, out_features)
GlobalBlockInd(graph) -> updated graph
"""
def __init__(self,
in_features,
out_features,
hidden_features=32,
custom_func=None):
super(GlobalBlockInd, self).__init__(in_features,
out_features,
use_edges=False,
use_nodes=False,
use_globals=True,
edge_reducer=None,
node_reducer=None,
custom_func=custom_func)
# Customized function
if custom_func:
# Customized function can be used for self.net instead of deafult function.
# It is highly recommended to use nn.Sequential() type.
self.net = custom_func
else:
self.hidden_features = hidden_features
self.net = nn.Sequential(nn.Linear(in_features, self.hidden_features),
nn.ReLU(),
nn.Linear(self.hidden_features, out_features),
)
| 9,996 | 0 | 236 |
72d4283e9b50b9a3c2f901684448ebea9b89084c | 1,860 | py | Python | cattledb/settings/default.py | reiterd/cattledb | 015214afa5c3b1e94b555b138334163068aaf982 | [
"MIT"
] | null | null | null | cattledb/settings/default.py | reiterd/cattledb | 015214afa5c3b1e94b555b138334163068aaf982 | [
"MIT"
] | null | null | null | cattledb/settings/default.py | reiterd/cattledb | 015214afa5c3b1e94b555b138334163068aaf982 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# coding: utf-8
import logging
import os
from cattledb.core.models import MetricDefinition, EventDefinition, MetricType, EventSeriesType
METRICS = [
MetricDefinition("test", "test", MetricType.FLOATSERIES, True),
# Raw Metrics
MetricDefinition("rawph", "rph", MetricType.FLOATSERIES, False),
MetricDefinition("adcph", "aph", MetricType.FLOATSERIES, False),
MetricDefinition("rawtemp", "rtp", MetricType.FLOATSERIES, False),
MetricDefinition("adctemp", "atp", MetricType.FLOATSERIES, False),
MetricDefinition("rawact", "rac", MetricType.FLOATSERIES, False),
MetricDefinition("rawhum", "rhu", MetricType.FLOATSERIES, False),
# Stage 1
MetricDefinition("ph", "ph", MetricType.FLOATSERIES, True),
MetricDefinition("temp", "tmp", MetricType.FLOATSERIES, True),
MetricDefinition("act", "act", MetricType.FLOATSERIES, True),
MetricDefinition("hum", "hum", MetricType.FLOATSERIES, True),
MetricDefinition("act_index", "aci", MetricType.FLOATSERIES, True),
MetricDefinition("rawphuncorrected", "uph", MetricType.FLOATSERIES, True)
]
EVENTS = [
EventDefinition("test_daily", EventSeriesType.DAILY),
EventDefinition("test_monthly", EventSeriesType.MONTHLY),
EventDefinition("test_monthly_*", EventSeriesType.MONTHLY)
]
TESTING = False
DEBUG = False
ENGINE = "bigtable"
ENGINE_OPTIONS = {
"credentials": None,
"project_id": "proj1",
"instance_id": "inst1"
}
READ_ONLY = False
ADMIN = True
POOL_SIZE = 10
TABLE_PREFIX = "mycdb"
LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "INFO",
"stream": "ext://sys.stdout"
}
},
"root": {
"level": "INFO",
"handlers": ["console"]
}
}
| 26.197183 | 95 | 0.670968 | #!/usr/bin/python
# coding: utf-8
import logging
import os
from cattledb.core.models import MetricDefinition, EventDefinition, MetricType, EventSeriesType
METRICS = [
MetricDefinition("test", "test", MetricType.FLOATSERIES, True),
# Raw Metrics
MetricDefinition("rawph", "rph", MetricType.FLOATSERIES, False),
MetricDefinition("adcph", "aph", MetricType.FLOATSERIES, False),
MetricDefinition("rawtemp", "rtp", MetricType.FLOATSERIES, False),
MetricDefinition("adctemp", "atp", MetricType.FLOATSERIES, False),
MetricDefinition("rawact", "rac", MetricType.FLOATSERIES, False),
MetricDefinition("rawhum", "rhu", MetricType.FLOATSERIES, False),
# Stage 1
MetricDefinition("ph", "ph", MetricType.FLOATSERIES, True),
MetricDefinition("temp", "tmp", MetricType.FLOATSERIES, True),
MetricDefinition("act", "act", MetricType.FLOATSERIES, True),
MetricDefinition("hum", "hum", MetricType.FLOATSERIES, True),
MetricDefinition("act_index", "aci", MetricType.FLOATSERIES, True),
MetricDefinition("rawphuncorrected", "uph", MetricType.FLOATSERIES, True)
]
EVENTS = [
EventDefinition("test_daily", EventSeriesType.DAILY),
EventDefinition("test_monthly", EventSeriesType.MONTHLY),
EventDefinition("test_monthly_*", EventSeriesType.MONTHLY)
]
TESTING = False
DEBUG = False
ENGINE = "bigtable"
ENGINE_OPTIONS = {
"credentials": None,
"project_id": "proj1",
"instance_id": "inst1"
}
READ_ONLY = False
ADMIN = True
POOL_SIZE = 10
TABLE_PREFIX = "mycdb"
LOGGING_CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "INFO",
"stream": "ext://sys.stdout"
}
},
"root": {
"level": "INFO",
"handlers": ["console"]
}
}
| 0 | 0 | 0 |
8465348662c1eb13168394b70fa2d1f7c261ac99 | 39 | py | Python | banana_dev/__init__.py | johnpaulbin/banana | dec464b9e82250e166162768a43274647b0c9d8e | [
"MIT"
] | null | null | null | banana_dev/__init__.py | johnpaulbin/banana | dec464b9e82250e166162768a43274647b0c9d8e | [
"MIT"
] | null | null | null | banana_dev/__init__.py | johnpaulbin/banana | dec464b9e82250e166162768a43274647b0c9d8e | [
"MIT"
] | null | null | null | from .package import run, start, check
| 19.5 | 38 | 0.769231 | from .package import run, start, check
| 0 | 0 | 0 |
fe7be313ace6d9b1b988232cc05a59fb6163639c | 2,302 | py | Python | army_ant/index/text_index.py | feup-infolab/army-ant | 7b33120d5160f73d7a41a05e6336489c917fb75c | [
"BSD-3-Clause"
] | 5 | 2018-01-18T14:11:52.000Z | 2020-10-23T16:02:25.000Z | army_ant/index/text_index.py | feup-infolab/army-ant | 7b33120d5160f73d7a41a05e6336489c917fb75c | [
"BSD-3-Clause"
] | 10 | 2018-02-02T20:19:36.000Z | 2020-10-05T08:46:36.000Z | army_ant/index/text_index.py | feup-infolab/army-ant | 7b33120d5160f73d7a41a05e6336489c917fb75c | [
"BSD-3-Clause"
] | null | null | null | import csv
import logging
import os
from enum import Enum
from army_ant.util.text import textrank
from . import Index
logger = logging.getLogger(__name__)
| 33.362319 | 113 | 0.590791 | import csv
import logging
import os
from enum import Enum
from army_ant.util.text import textrank
from . import Index
logger = logging.getLogger(__name__)
class TextIndex(Index):
class Feature(Enum):
keywords = 'EXTRACT_KEYWORDS'
def __init__(self, reader, index_location, index_features, loop):
super().__init__(reader, index_location, loop)
self.index_features = [TextIndex.Feature[index_feature] for index_feature in index_features]
self.index_filename = os.path.join(self.index_location, "index.csv")
os.makedirs(self.index_location, exist_ok=True)
async def index(self, features_location=None):
if TextIndex.Feature.keywords in self.index_features:
logger.info("Indexing top %.0f%% keywords per document based on TextRank" % (Index.KW_RATIO * 100))
resume = None
if features_location:
path = os.path.join(features_location, "resume")
if os.path.exists(path):
with open(path) as fp:
resume = int(fp.read())
logger.info("Skipping to document %d to resume collection processing" % resume)
count = 0
with open(self.index_filename, 'w') as fp:
csv_writer = csv.writer(fp, delimiter='\t')
if TextIndex.Feature.keywords in self.index_features:
csv_writer.writerow(['doc_id', 'keywords'])
else:
csv_writer.writerow(['doc_id', 'text'])
for doc in self.reader:
count += 1
if count % 1000 == 0:
logger.info("%d documents read" % count)
if not doc.doc_id:
logger.warning("Document %d does not have a 'doc_id', skipping" % count)
continue
if TextIndex.Feature.keywords in self.index_features:
doc.text = '||'.join(textrank(doc.text, ratio=Index.KW_RATIO, as_list=True))
if not doc.text:
logger.warning("Document %d (%s) does not have a text block, skipping" % (count, doc.doc_id))
continue
csv_writer.writerow([doc.doc_id, doc.text])
yield doc
logger.info("%d documents read" % count)
| 2,001 | 119 | 23 |
6f51b3a31c99860d61d9d4efb0919491a6bb36b2 | 7,614 | py | Python | src/graph_transpiler/webdnn/backend/webgpu/kernels/im2col.py | gunpowder78/webdnn | c659ea49007f91d178ce422a1eebe289516a71ee | [
"MIT"
] | 1 | 2018-07-26T13:52:21.000Z | 2018-07-26T13:52:21.000Z | src/graph_transpiler/webdnn/backend/webgpu/kernels/im2col.py | gunpowder78/webdnn | c659ea49007f91d178ce422a1eebe289516a71ee | [
"MIT"
] | null | null | null | src/graph_transpiler/webdnn/backend/webgpu/kernels/im2col.py | gunpowder78/webdnn | c659ea49007f91d178ce422a1eebe289516a71ee | [
"MIT"
] | null | null | null | from typing import List
from webdnn.backend.code_generator.allocator import MemoryLayout
from webdnn.backend.code_generator.injectors.buffer_injector import BufferInjector
from webdnn.backend.code_generator.injectors.kernel_name_injector import KernelNameInjector
from webdnn.backend.webgpu.generator import WebGPUDescriptorGenerator
from webdnn.backend.webgpu.kernel import GPUSize, Kernel
from webdnn.graph.axis import Axis
from webdnn.graph.operators.im2col import Im2Col
from webdnn.graph.order import OrderNHWC, OrderCNHW
template_CNHW = """
kernel void %%FUNC_NAME%%(device float * %%STATIC_BUFFER%%[[buffer(0)]],
device float * %%DYNAMIC_BUFFER%%[[buffer(1)]],
const device int * %%META_BUFFER%% [[buffer(2)]],
uint index[[thread_position_in_grid]],
uint num_threads[[threads_per_grid]])
{
const device float *im = %%LOAD_BUFFER(im2col_im)%%;
device float *col = %%LOAD_BUFFER(im2col_col)%%;
const int N = %%LOAD_BUFFER(im2col_N)%%;
const int C1 = %%LOAD_BUFFER(im2col_C1)%%;
const int H1 = %%LOAD_BUFFER(im2col_H1)%%;
const int W1 = %%LOAD_BUFFER(im2col_W1)%%;
const int H2 = %%LOAD_BUFFER(im2col_H2)%%;
const int W2 = %%LOAD_BUFFER(im2col_W2)%%;
const int KH = %%LOAD_BUFFER(im2col_KH)%%;
const int KW = %%LOAD_BUFFER(im2col_KW)%%;
const int DH = %%LOAD_BUFFER(im2col_DH)%%;
const int DW = %%LOAD_BUFFER(im2col_DW)%%;
const int SH = %%LOAD_BUFFER(im2col_SH)%%;
const int SW = %%LOAD_BUFFER(im2col_SW)%%;
const int PH = %%LOAD_BUFFER(im2col_PH)%%;
const int PW = %%LOAD_BUFFER(im2col_PW)%%;
for (int gid = index; gid < N*H2*W2*KH*KW*C1; gid += num_threads) {
const int w2 = gid % W2;
const int h2 = gid / W2 % H2;
const int n = gid / W2 / H2 % N;
const int c1 = gid / W2 / H2 / N % C1;
const int kw = gid / W2 / H2 / N / C1 % KW;
const int kh = gid / W2 / H2 / N / C1 / KW;
const int h1 = h2 * SH - PH + kh * DH;
const int w1 = w2 * SW - PW + kw * DW;
col[gid] = (h1 < 0 || h1 >= H1 || w1 < 0 || w1 >= W1) ? 0 : im[((n*H1+h1)*W1+w1)*C1+c1];
}
}
"""
@WebGPUDescriptorGenerator.register_handler(Im2Col)
| 33.104348 | 120 | 0.57959 | from typing import List
from webdnn.backend.code_generator.allocator import MemoryLayout
from webdnn.backend.code_generator.injectors.buffer_injector import BufferInjector
from webdnn.backend.code_generator.injectors.kernel_name_injector import KernelNameInjector
from webdnn.backend.webgpu.generator import WebGPUDescriptorGenerator
from webdnn.backend.webgpu.kernel import GPUSize, Kernel
from webdnn.graph.axis import Axis
from webdnn.graph.operators.im2col import Im2Col
from webdnn.graph.order import OrderNHWC, OrderCNHW
def generate_template_NHWC(SH, SW, DH, DW, C1):
SH_EQUAL_1 = 1 if SH == 1 else 0
SW_EQUAL_1 = 1 if SW == 1 else 0
DH_EQUAL_1 = 1 if DH == 1 else 0
DW_EQUAL_1 = 1 if DW == 1 else 0
C1_DIVIDABLE_BY_4 = 1 if C1 % 4 == 0 else 0
return f"""
kernel void %%FUNC_NAME%%(device float * %%STATIC_BUFFER%%[[buffer(0)]],
device float * %%DYNAMIC_BUFFER%%[[buffer(1)]],
const device int * %%META_BUFFER%% [[buffer(2)]],
uint index_thread[[thread_position_in_threadgroup]],
uint index_group[[threadgroup_position_in_grid]])
{{
#define SH_EQUAL_1 {SH_EQUAL_1}
#define SW_EQUAL_1 {SW_EQUAL_1}
#define DH_EQUAL_1 {DH_EQUAL_1}
#define DW_EQUAL_1 {DW_EQUAL_1}
#define C1_DIVIDABLE_BY_4 {C1_DIVIDABLE_BY_4}
#if OPTIMIZE && C1_DIVIDABLE_BY_4
const device float4 *im4 = (const device float4 *)(%%LOAD_BUFFER(im2col_im)%%);
device float4 *col4 = (device float4 *)(%%LOAD_BUFFER(im2col_col)%%);
const int C1_4 = (%%LOAD_BUFFER(im2col_C1)%%) >> 2;
#else
const device float *im = %%LOAD_BUFFER(im2col_im)%%;
device float *col = %%LOAD_BUFFER(im2col_col)%%;
const int C1 = %%LOAD_BUFFER(im2col_C1)%%;
#endif
const int H1 = %%LOAD_BUFFER(im2col_H1)%%;
const int W1 = %%LOAD_BUFFER(im2col_W1)%%;
const int H2 = %%LOAD_BUFFER(im2col_H2)%%;
const int W2 = %%LOAD_BUFFER(im2col_W2)%%;
const int KH = %%LOAD_BUFFER(im2col_KH)%%;
const int KW = %%LOAD_BUFFER(im2col_KW)%%;
#if !DH_EQUAL_1
const int DH = %%LOAD_BUFFER(im2col_DH)%%;
#endif
#if !DW_EQUAL_1
const int DW = %%LOAD_BUFFER(im2col_DW)%%;
#endif
const int PH = %%LOAD_BUFFER(im2col_PH)%%;
const int PW = %%LOAD_BUFFER(im2col_PW)%%;
#if !OPTIMIZE || !SH_EQUAL_1
const int SH = %%LOAD_BUFFER(im2col_SH)%%;
#endif
#if !OPTIMIZE || !SW_EQUAL_1
const int SW = %%LOAD_BUFFER(im2col_SW)%%;
#endif
const int H1P = H1 + 2 * PH;
const int W1P = W1 + 2 * PW;
const int w1 = (index_group % W1P) - PW;
const int h1 = (index_group / W1P % H1P) - PH;
const int n = index_group / W1P / H1P;
#if OPTIMIZE && C1_DIVIDABLE_BY_4
for (int c1_4 = index_thread; c1_4 < C1_4; c1_4 += 64) {{
const float4 v4 = (h1 < 0 || h1 >= H1 || w1 < 0 || w1 >= W1) ? 0 : im4[((n * H1 + h1) * W1 + w1) * C1_4 + c1_4];
#else
for (int c1 = index_thread; c1 < C1; c1 += 64) {{
const float v = (h1 < 0 || h1 >= H1 || w1 < 0 || w1 >= W1) ? 0 : im[((n * H1 + h1) * W1 + w1) * C1 + c1];
#endif
#if OPTIMIZE && SH_EQUAL_1
for (int kh = 0; kh < KH; kh++) {{
#if DH_EQUAL_1
const int h2 = h1 + PH - kh;
#else
const int h2 = h1 + PH - kh * DH;
#endif
#else
for (int kh = (h1 + PH) % SH; kh < KH; kh += SH) {{
#if DH_EQUAL_1
const int h2 = (h1 + PH - kh) / SH;
#else
const int h2 = (h1 + PH - kh * DH) / SH;
#endif
#endif
if (h2 < 0 || h2 >= H2) continue;
#if OPTIMIZE && SH_EQUAL_1
for (int kw = 0; kw < KW; kw++) {{
#if DW_EQUAL_1
const int w2 = w1 + PW - kw;
#else
const int w2 = w1 + PW - kw * DW;
#endif
#else
for (int kw = (w1 + PW) % SW; kw < KW; kw += SW) {{
#if DW_EQUAL_1
const int w2 = (w1 + PW - kw) / SW;
#else
const int w2 = (w1 + PW - kw * DW) / SW;
#endif
#endif
if (w2 < 0 || w2 >= W2) continue;
#if OPTIMIZE && C1_DIVIDABLE_BY_4
col4[((((n * H2 + h2) * W2 + w2) * KH + kh) * KW + kw) * C1_4 + c1_4] = v4;
#else
col[((((n * H2 + h2) * W2 + w2) * KH + kh) * KW + kw) * C1 + c1] = v;
#endif
}}
}}
}}
#undef SH_EQUAL_1
#undef SW_EQUAL_1
#undef DH_EQUAL_1
#undef DW_EQUAL_1
#undef C1_DIVIDABLE_BY_4
}}
"""
template_CNHW = """
kernel void %%FUNC_NAME%%(device float * %%STATIC_BUFFER%%[[buffer(0)]],
device float * %%DYNAMIC_BUFFER%%[[buffer(1)]],
const device int * %%META_BUFFER%% [[buffer(2)]],
uint index[[thread_position_in_grid]],
uint num_threads[[threads_per_grid]])
{
const device float *im = %%LOAD_BUFFER(im2col_im)%%;
device float *col = %%LOAD_BUFFER(im2col_col)%%;
const int N = %%LOAD_BUFFER(im2col_N)%%;
const int C1 = %%LOAD_BUFFER(im2col_C1)%%;
const int H1 = %%LOAD_BUFFER(im2col_H1)%%;
const int W1 = %%LOAD_BUFFER(im2col_W1)%%;
const int H2 = %%LOAD_BUFFER(im2col_H2)%%;
const int W2 = %%LOAD_BUFFER(im2col_W2)%%;
const int KH = %%LOAD_BUFFER(im2col_KH)%%;
const int KW = %%LOAD_BUFFER(im2col_KW)%%;
const int DH = %%LOAD_BUFFER(im2col_DH)%%;
const int DW = %%LOAD_BUFFER(im2col_DW)%%;
const int SH = %%LOAD_BUFFER(im2col_SH)%%;
const int SW = %%LOAD_BUFFER(im2col_SW)%%;
const int PH = %%LOAD_BUFFER(im2col_PH)%%;
const int PW = %%LOAD_BUFFER(im2col_PW)%%;
for (int gid = index; gid < N*H2*W2*KH*KW*C1; gid += num_threads) {
const int w2 = gid % W2;
const int h2 = gid / W2 % H2;
const int n = gid / W2 / H2 % N;
const int c1 = gid / W2 / H2 / N % C1;
const int kw = gid / W2 / H2 / N / C1 % KW;
const int kh = gid / W2 / H2 / N / C1 / KW;
const int h1 = h2 * SH - PH + kh * DH;
const int w1 = w2 * SW - PW + kw * DW;
col[gid] = (h1 < 0 || h1 >= H1 || w1 < 0 || w1 >= W1) ? 0 : im[((n*H1+h1)*W1+w1)*C1+c1];
}
}
"""
@WebGPUDescriptorGenerator.register_handler(Im2Col)
def im2col(op: Im2Col, memory_layout: MemoryLayout) -> List[Kernel]:
im = op.inputs["im"]
col = op.outputs["col"]
assert im.order == OrderNHWC
assert col.order == OrderNHWC or col.order == OrderCNHW
N = im.shape_dict[Axis.N]
C1 = im.shape_dict[Axis.C]
H1 = im.shape_dict[Axis.H]
W1 = im.shape_dict[Axis.W]
H1P = H1 + 2 * op.PH
W1P = W1 + 2 * op.PW
buffer_injector = BufferInjector()
buffer_injector.register({
"im2col_im": memory_layout[im],
"im2col_col": memory_layout[col],
"im2col_N": N,
"im2col_C1": C1,
"im2col_H1": im.shape_dict[Axis.H],
"im2col_W1": im.shape_dict[Axis.W],
"im2col_H2": col.shape_dict[Axis.H],
"im2col_W2": col.shape_dict[Axis.W],
"im2col_KH": op.KH,
"im2col_KW": op.KW,
"im2col_DH": op.DH,
"im2col_DW": op.DW,
"im2col_SH": op.SH,
"im2col_SW": op.SW,
"im2col_PH": op.PH,
"im2col_PW": op.PW,
})
name_injector = KernelNameInjector(op)
source = template_CNHW if col.order == OrderCNHW else generate_template_NHWC(op.SH, op.SW, op.DH, op.DW, C1)
source = buffer_injector.inject(source)
source = name_injector.inject(source)
kernel = Kernel(
{name_injector.name: source},
name_injector.name,
GPUSize(N * H1P * W1P, 1, 1),
GPUSize(64, 1, 1),
buffer_injector.buffer,
buffer_injector.unresolved_value_list
)
return [kernel]
| 5,301 | 0 | 45 |
46243340464295c5454d70c073cd34a19661a3e8 | 1,049 | py | Python | demoslogic/users/models.py | amstart/demoslogic | 059575b502c21f8f27c66a26abee9a42fcb788b7 | [
"MIT"
] | null | null | null | demoslogic/users/models.py | amstart/demoslogic | 059575b502c21f8f27c66a26abee9a42fcb788b7 | [
"MIT"
] | 3 | 2021-06-08T20:04:58.000Z | 2022-03-11T23:26:36.000Z | demoslogic/users/models.py | amstart/demoslogic | 059575b502c21f8f27c66a26abee9a42fcb788b7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import datetime
from django.contrib.auth.models import AbstractUser
from django.urls import reverse
from django.db import models
from six import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
@python_2_unicode_compatible
| 32.78125 | 100 | 0.717827 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import datetime
from django.contrib.auth.models import AbstractUser
from django.urls import reverse
from django.db import models
from six import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
@python_2_unicode_compatible
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_('Name of User'), blank=True, max_length=255)
staged = models.DateTimeField(null = True, blank = True)
def __init__(self, *args, **kwargs):
super(AbstractUser,self).__init__(*args, **kwargs)
if self.staged is None and self.date_joined < timezone.now() - datetime.timedelta(days = 1):
self.staged = timezone.now()
self.save()
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
| 347 | 304 | 22 |
e0ed7170031de647fbca6e14e595dd87d7a542cf | 5,213 | py | Python | SentimentAnalysis.py | balbidatascience/demo-analisesentimento | 83e57b3744964dca166e9ed56ac537444bb14d31 | [
"MIT"
] | null | null | null | SentimentAnalysis.py | balbidatascience/demo-analisesentimento | 83e57b3744964dca166e9ed56ac537444bb14d31 | [
"MIT"
] | null | null | null | SentimentAnalysis.py | balbidatascience/demo-analisesentimento | 83e57b3744964dca166e9ed56ac537444bb14d31 | [
"MIT"
] | null | null | null | import nltk
import re
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.model_selection import cross_val_predict
#nltk.download('punkt')
from textblob import TextBlob
from DataLake import Mongo
obj = SentimentAnalysis()
obj.test()
#print(obj.cleanTweet('😂😂😂😬👀🙄👹😍😜😎 Gabriel é lindo'))
| 33.850649 | 116 | 0.549971 | import nltk
import re
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.model_selection import cross_val_predict
#nltk.download('punkt')
from textblob import TextBlob
from DataLake import Mongo
class SentimentAnalysis:
df_tweet = pd.read_csv('data/Tweets_Mg.csv')
tweets = df_tweet['Text'].values
classes = df_tweet['Classificacao'].values
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
def TrainModel(self):
vectorizer = CountVectorizer(analyzer="word")
freq_tweets = vectorizer.fit_transform(self.tweets)
modelo = MultinomialNB()
modelo.fit(freq_tweets, self.classes)
return vectorizer, modelo
def TrainModel2(self):
vectorizer2 = CountVectorizer(ngram_range=(1, 2))
freq_tweets = vectorizer2.fit_transform(self.tweets)
modelo2 = MultinomialNB()
modelo2.fit(freq_tweets, self.classes)
return vectorizer2, modelo2
def Predict(self, tweet):
vec, model = self.TrainModel()
freq_testes = vec.transform(tweet)
return {'tweet': tweet,
'result': model.predict(freq_testes)}
def Predict2(self, tweet):
vec, model = self.TrainModel2()
freq_testes = vec.transform(tweet)
return {'tweet': tweet,
'result': model.predict(freq_testes)}
def ComparePredict(self, tweet):
vec, model = self.TrainModel()
vec2, model2 = self.TrainModel2()
freq_testes = vec.transform(tweet)
# freq_testes2 = vec2.transform(tweet)
return {'tweet': tweet,
'result': model.predict(freq_testes).values}
def isReTweet(self, tweet):
# Ignore retweets
if re.match(r'^RT.*', tweet):
return True
else:
return False
def cleanTweet(self, tweet):
tweet = str(tweet).lower()
# Remove URLS. (I stole this regex from the internet.)
tweet = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', tweet)
tweet = re.sub(r'\bthats\b', 'that is', tweet)
tweet = re.sub(r'\bive\b', 'i have', tweet)
tweet = re.sub(r'\bim\b', 'i am', tweet)
tweet = re.sub(r'\bya\b', 'yeah', tweet)
tweet = re.sub(r'\bcant\b', 'can not', tweet)
tweet = re.sub(r'\bwont\b', 'will not', tweet)
tweet = re.sub(r'\bid\b', 'i would', tweet)
tweet = re.sub(r'wtf', 'what the fuck', tweet)
tweet = re.sub(r'\bwth\b', 'what the hell', tweet)
tweet = re.sub(r'\br\b', 'are', tweet)
tweet = re.sub(r'\bu\b', 'you', tweet)
tweet = re.sub(r'\bk\b', 'OK', tweet)
tweet = re.sub(r'\bsux\b', 'sucks', tweet)
tweet = re.sub(r'\bno+\b', 'no', tweet)
tweet = re.sub(r'\bcoo+\b', 'cool', tweet)
# remove emojis
tweet = self.emoji_pattern.sub(r'', tweet)
return tweet
# Utilizando o pacote TextBlob, mas foi necessário traduzir para o Inglês.
def getSentimentAnalysis(self, tweet):
# Verify if retweet
# print(self.isReTweet(str(tweet)))
text = self.cleanTweet(tweet)
textBlod = TextBlob(text)
frase = textBlod.sentences
print('------------------------------------------------------------------')
print('Antes: {0}'.format(tweet))
print('Depoi: {0}'.format(text))
print('------- ------------- -----------------')
if textBlod.detect_language() != 'en':
trad = TextBlob(str(textBlod.translate(to='en')))
print('Sentimento Geral: {0}'.format(trad.sentiment))
else:
print('Sentimento Geral: {0}'.format(textBlod.sentiment))
print('\n')
for sentence in frase:
if sentence.detect_language() != 'en':
traducao = TextBlob(str(sentence.translate(to='en')))
print('Frase: {0} - Sentimento: {1}'.format(traducao, traducao.sentiment))
else:
print('Frase: {0} - Sentimento: {1}'.format(sentence, sentence.sentiment))
print('------------------------------------------------------------------')
#if frase.detect_language() != 'en':
# traducao = TextBlob(str(frase.translate(to='en')))
# print('Tweet: {0} - Sentimento: {1}'.format(tweet, traducao.sentiment))
#else:
# print('Tweet: {0} - Sentimento: {1}'.format(tweet, frase.sentiment))
return True
def test(self):
db = Mongo()
tweets = db.listTweets()
for tweet in tweets:
self.getSentimentAnalysis(tweet['text'])
obj = SentimentAnalysis()
obj.test()
#print(obj.cleanTweet('😂😂😂😬👀🙄👹😍😜😎 Gabriel é lindo'))
| 3,928 | 856 | 23 |
000ca440049f8d83381e7ba88188f82ac14455c1 | 717 | py | Python | index.py | lidalei/VisAss | 34cc5e6f8ad2f6962dd60e99849f987d8cf7b0d6 | [
"MIT"
] | null | null | null | index.py | lidalei/VisAss | 34cc5e6f8ad2f6962dd60e99849f987d8cf7b0d6 | [
"MIT"
] | null | null | null | index.py | lidalei/VisAss | 34cc5e6f8ad2f6962dd60e99849f987d8cf7b0d6 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, Markup, jsonify
import json, time
app = Flask(__name__)
@app.route('/')
@app.route('/heatmap')
@app.route('/parallel-coordinates')
@app.route('/getArticlesByAuthor/<author_name>')
if __name__ == '__main__':
app.run(port = 5000)
| 25.607143 | 107 | 0.728033 | from flask import Flask, render_template, request, Markup, jsonify
import json, time
app = Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/heatmap')
def heatmap():
return render_template('heat_map.html')
@app.route('/parallel-coordinates')
def parallel_coordinates():
return render_template('parallel_coordinates.html')
@app.route('/getArticlesByAuthor/<author_name>')
def get_articles_by_author(author_name = 'Xiaoou Tang'):
nearest_author = "Dalei Li"
articles_html = "<p>Hi~</p>"
return render_template('articles.html', author_name = nearest_author, articles = Markup(articles_html))
if __name__ == '__main__':
app.run(port = 5000)
| 338 | 0 | 88 |
6a8389c66ee5acfd26f7de44c21cc66038ceacce | 9,031 | py | Python | mcw/meson.py | kicsyromy/meson-cmake-wrapper | bb7316ec38461e96ad2354a3a034b132dca64db3 | [
"MIT"
] | null | null | null | mcw/meson.py | kicsyromy/meson-cmake-wrapper | bb7316ec38461e96ad2354a3a034b132dca64db3 | [
"MIT"
] | null | null | null | mcw/meson.py | kicsyromy/meson-cmake-wrapper | bb7316ec38461e96ad2354a3a034b132dca64db3 | [
"MIT"
] | null | null | null | import os
import json
import subprocess
import logging
from .ninja import NinjaBackend
class Meson:
"""
Base class that handles data fetching and setting options for Meson.
"""
| 35.695652 | 131 | 0.576459 | import os
import json
import subprocess
import logging
from .ninja import NinjaBackend
class Meson:
"""
Base class that handles data fetching and setting options for Meson.
"""
def __init__(self, path='meson'):
self.path = path
self.backend = None
self.build_dir = None
self.source_dir = None
self.build_type = None
self.cross_file = None
# Cache
self.c_version = None
self.c_project_name = None
self.c_targets = None
self.c_target_files = {}
self.c_buildsystem_files = None
self.c_project_info = None
self.c_compile_commands = None
self.c_compile_commands_target = {}
self.c_default_inc_dirs = {}
self.logger = logging.getLogger('Meson')
def get_logger(self):
return self.logger
def call(self, args, show=False) -> str:
child = subprocess.Popen([self.path] + args, stdout=subprocess.PIPE)
fulloutput = b''
while True:
output = child.stdout.readline()
if output == b'' and child.poll() is not None:
break
if output:
if show:
print(output.decode("utf-8"), end='')
fulloutput += output
fulloutput = fulloutput.decode("utf-8")
if child.poll() != 0:
raise RuntimeError(fulloutput)
return fulloutput
def set_backend(self, backend):
if backend == 'ninja':
self.backend = NinjaBackend(self)
else:
raise RuntimeError('Backend not supported: ' + backend)
def setup(self):
if not self.backend:
raise RuntimeError('Build is not initilized')
if self.backend.setup():
return
meson_file = os.path.join(self.source_dir, 'meson.build')
if not os.path.exists(meson_file):
raise RuntimeError('No meson.build in source directory!')
self.call(['setup'] + self.get_options() + [self.source_dir, self.build_dir], True)
def build(self, target):
return self.backend.build(target)
def get_version(self):
if not self.c_version:
self.c_version = list(map(int, self.call(['--version']).split('.')))
return self.c_version
def get_project_name(self):
if not self.c_project_name:
if self.get_version()[1] >= 49:
attr = 'descriptive_name'
else:
attr = 'name'
self.c_project_name = self.get_project_info()[attr]
return self.c_project_name
def get_targets(self):
if not self.c_targets:
output = self.call(['introspect', '--targets', self.build_dir])
self.logger.debug('(targets) "%s"' % output)
self.c_targets = json.loads(output)
target_names = [ i ['name'] for i in self.c_targets]
self.logger.info('(targets) {}'.format(target_names))
return self.c_targets
def get_target_files(self, target):
id = target['id']
if id == 'all' or target['type'] in ('run', 'custom'):
return []
if id in self.c_target_files:
return self.c_target_files[id]
# Handle the new targets API of 0.50.0
if 'target_sources' in target:
self.c_target_files[id] = []
for i in target['target_sources']:
self.c_target_files[id] += i['sources']
self.c_target_files[id] += i['generated_sources']
return self.c_target_files[id]
# Handle meson versions before 0.50.0
self.logger.debug('(target) "%s"' % id)
output = self.call(['introspect', '--target-files', id, self.build_dir])
self.logger.debug('(target files) "%s"' % output)
# Workaround https://github.com/mesonbuild/meson/issues/2783
if output == '':
return []
self.c_target_files[id] = json.loads(output)
return self.c_target_files[id]
def get_buildsystem_files(self):
if not self.c_buildsystem_files:
output = self.call(['introspect', '--buildsystem-files', self.build_dir])
self.logger.debug('(buildsystem files) "%s"' % output)
self.c_buildsystem_files = json.loads(output)
return self.c_buildsystem_files
def get_project_info(self):
if not self.c_project_info:
output = self.call(['introspect', '--projectinfo', self.build_dir])
self.logger.info('(project info) "%s"' % output)
self.c_project_info = json.loads(output)
return self.c_project_info
def get_compile_commands(self, target):
id = target['id']
if id not in self.c_compile_commands_target:
if not self.c_compile_commands:
compile_commands_file = os.path.join(self.build_dir, 'compile_commands.json')
if not os.path.exists(compile_commands_file):
Exception('No compile_commands.json in build dir')
json_data = open(compile_commands_file).read()
self.c_compile_commands = json.loads(json_data)
# Only way to identify target compiler commands from compile_commands.json
# is by using a file from the wanted target
if len(self.get_target_files(target)) == 0:
return []
target_file = os.path.relpath(os.path.join(self.source_dir, self.get_target_files(target)[0]), self.build_dir)
self.c_compile_commands_target[id] = next((cmd for cmd in self.c_compile_commands if cmd['file'] == target_file), None)
return self.c_compile_commands_target[id]
def get_compiler(self, target=None):
if not target:
target = self.get_targets()[0]
compile_commands = self.get_compile_commands(target)
if not compile_commands:
return ''
return compile_commands['command'].split()[0]
def get_flags(self, target):
compile_commands = self.get_compile_commands(target)
if not compile_commands:
return []
args = compile_commands['command'].split()[1:]
return [arg for arg in args if not arg.startswith(('-D', '-I'))]
def get_defines(self, target):
compile_commands = self.get_compile_commands(target)
if not compile_commands:
return []
args = compile_commands['command'].split()
return [arg for arg in args if arg.startswith('-D')]
def get_include_directories(self, target=None, def_inc=True):
if not target:
target = self.get_targets()[0]
compile_commands = self.get_compile_commands(target)
if not compile_commands:
return []
if def_inc:
def_inc_dirs = self.get_default_include_directories(target)
else:
def_inc_dirs = []
args = compile_commands['command'].split()
return [os.path.abspath(os.path.join(self.build_dir, arg[2:])) for arg in args if
arg.startswith('-I')] + def_inc_dirs
def get_default_include_directories(self, target=None):
compiler = self.get_compiler(target)
if not compiler:
return []
if compiler.endswith('++'):
lang = 'c++'
else:
lang = 'c'
if lang not in self.c_default_inc_dirs:
output = subprocess.Popen([compiler, '-x' + lang, '-E', '-v', '-'],
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.PIPE)
stderr = output.stderr.read().decode()
start = False
paths = []
for line in stderr.split('\n'):
if not start:
if line == '#include <...> search starts here:':
start = True
elif start:
if line == 'End of search list.':
break
else:
paths.append(os.path.abspath(line[1:]))
self.c_default_inc_dirs[lang] = paths
return self.c_default_inc_dirs[lang]
def get_output(self, target):
return os.path.join(self.build_dir, self.get_target_filename(target))
def get_target_filename(self, target):
filename = target['filename']
if isinstance(filename, list):
if len(filename) > 1:
self.logger.debug('Target {} has more than 1 filename {}'.format(target['name'], filename))
return filename[0]
else:
return filename
def get_target_filename_default(self):
if self.get_version()[1] >= 50:
return ['']
else:
return ''
def get_options(self):
meson_options = []
if self.cross_file:
meson_options += ['--cross-file', self.cross_file]
return meson_options
| 8,245 | 0 | 594 |
d25b9f95cf20672879daadcfb806528e276ccb89 | 377 | py | Python | intro-cs/py4e/exer5-1.py | zdmgg/OSSU-compsci | 2095d80c80224b1994f84030cc14da3af716f489 | [
"MIT"
] | null | null | null | intro-cs/py4e/exer5-1.py | zdmgg/OSSU-compsci | 2095d80c80224b1994f84030cc14da3af716f489 | [
"MIT"
] | null | null | null | intro-cs/py4e/exer5-1.py | zdmgg/OSSU-compsci | 2095d80c80224b1994f84030cc14da3af716f489 | [
"MIT"
] | null | null | null | score = None
count = 0
total = 0
average = 0
while score != 'done':
try:
number = input('Enter a number: ')
if number == 'done':
break
number = float(number)
total = total + number
count = count + 1
average = total/count
except:
print('Invalid input')
print(total, count, average)
| 19.842105 | 43 | 0.509284 | score = None
count = 0
total = 0
average = 0
while score != 'done':
try:
number = input('Enter a number: ')
if number == 'done':
break
number = float(number)
total = total + number
count = count + 1
average = total/count
except:
print('Invalid input')
print(total, count, average)
| 0 | 0 | 0 |
ce05583c8c4cd3f7b5f82b7c66008a0a2127008a | 6,537 | py | Python | sdk/python/pulumi_alicloud/imp/outputs.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 42 | 2019-03-18T06:34:37.000Z | 2022-03-24T07:08:57.000Z | sdk/python/pulumi_alicloud/imp/outputs.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 152 | 2019-04-15T21:03:44.000Z | 2022-03-29T18:00:57.000Z | sdk/python/pulumi_alicloud/imp/outputs.py | pulumi/pulumi-alicloud | 9c34d84b4588a7c885c6bec1f03b5016e5a41683 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2020-08-26T17:30:07.000Z | 2021-07-05T01:37:45.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'AppTemplateConfigList',
'GetAppTemplatesTemplateResult',
'GetAppTemplatesTemplateConfigListResult',
]
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
| 30.124424 | 105 | 0.602264 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'AppTemplateConfigList',
'GetAppTemplatesTemplateResult',
'GetAppTemplatesTemplateConfigListResult',
]
@pulumi.output_type
class AppTemplateConfigList(dict):
def __init__(__self__, *,
key: Optional[str] = None,
value: Optional[str] = None):
"""
:param str key: Configuration item key. Valid values:
:param str value: Configuration item content.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[str]:
"""
Configuration item key. Valid values:
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
Configuration item content.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class GetAppTemplatesTemplateResult(dict):
def __init__(__self__, *,
app_template_creator: str,
app_template_id: str,
app_template_name: str,
component_lists: Sequence[str],
config_lists: Sequence['outputs.GetAppTemplatesTemplateConfigListResult'],
create_time: str,
id: str,
integration_mode: str,
scene: str,
sdk_info: str,
standard_room_info: str,
status: str):
"""
:param str app_template_creator: Apply template creator.
:param str app_template_id: The first ID of the resource.
:param str app_template_name: The name of the resource.
:param Sequence[str] component_lists: List of components.
:param Sequence['GetAppTemplatesTemplateConfigListArgs'] config_lists: List of config.
:param str create_time: Creation time.
:param str id: The ID of the App Template.
:param str integration_mode: Integration mode (Integrated SDK:paasSDK, Model Room: standardRoom).
:param str scene: Application Template scenario, e-commerce business, classroom classroom.
:param str sdk_info: SDK information.
:param str standard_room_info: Model room information.
:param str status: Application template usage status.
"""
pulumi.set(__self__, "app_template_creator", app_template_creator)
pulumi.set(__self__, "app_template_id", app_template_id)
pulumi.set(__self__, "app_template_name", app_template_name)
pulumi.set(__self__, "component_lists", component_lists)
pulumi.set(__self__, "config_lists", config_lists)
pulumi.set(__self__, "create_time", create_time)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "integration_mode", integration_mode)
pulumi.set(__self__, "scene", scene)
pulumi.set(__self__, "sdk_info", sdk_info)
pulumi.set(__self__, "standard_room_info", standard_room_info)
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="appTemplateCreator")
def app_template_creator(self) -> str:
"""
Apply template creator.
"""
return pulumi.get(self, "app_template_creator")
@property
@pulumi.getter(name="appTemplateId")
def app_template_id(self) -> str:
"""
The first ID of the resource.
"""
return pulumi.get(self, "app_template_id")
@property
@pulumi.getter(name="appTemplateName")
def app_template_name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "app_template_name")
@property
@pulumi.getter(name="componentLists")
def component_lists(self) -> Sequence[str]:
"""
List of components.
"""
return pulumi.get(self, "component_lists")
@property
@pulumi.getter(name="configLists")
def config_lists(self) -> Sequence['outputs.GetAppTemplatesTemplateConfigListResult']:
"""
List of config.
"""
return pulumi.get(self, "config_lists")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
Creation time.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the App Template.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="integrationMode")
def integration_mode(self) -> str:
"""
Integration mode (Integrated SDK:paasSDK, Model Room: standardRoom).
"""
return pulumi.get(self, "integration_mode")
@property
@pulumi.getter
def scene(self) -> str:
"""
Application Template scenario, e-commerce business, classroom classroom.
"""
return pulumi.get(self, "scene")
@property
@pulumi.getter(name="sdkInfo")
def sdk_info(self) -> str:
"""
SDK information.
"""
return pulumi.get(self, "sdk_info")
@property
@pulumi.getter(name="standardRoomInfo")
def standard_room_info(self) -> str:
"""
Model room information.
"""
return pulumi.get(self, "standard_room_info")
@property
@pulumi.getter
def status(self) -> str:
"""
Application template usage status.
"""
return pulumi.get(self, "status")
@pulumi.output_type
class GetAppTemplatesTemplateConfigListResult(dict):
def __init__(__self__, *,
key: str,
value: str):
"""
:param str key: Config key.
:param str value: Config Value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
Config key.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
Config Value.
"""
return pulumi.get(self, "value")
| 0 | 5,922 | 66 |
6591f7090270ba134f7b7018b0f663d5ae42d2df | 662 | py | Python | CalibMuon/DTCalibration/python/dtCalibValidation_cfi.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | CalibMuon/DTCalibration/python/dtCalibValidation_cfi.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | CalibMuon/DTCalibration/python/dtCalibValidation_cfi.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
dtCalibValidation = cms.EDAnalyzer("DTCalibValidation",
# Write the histos on file
OutputMEsInRootFile = cms.bool(False),
# Lable to retrieve 2D segments from the event
segment2DLabel = cms.untracked.string('dt2DSegments'),
OutputFileName = cms.string('residuals.root'),
# Lable to retrieve 4D segments from the event
segment4DLabel = cms.untracked.string('dt4DSegments'),
debug = cms.untracked.bool(False),
# Lable to retrieve RecHits from the event
recHits1DLabel = cms.untracked.string('dt1DRecHits'),
# Detailed analysis
detailedAnalysis = cms.untracked.bool(False)
)
| 38.941176 | 58 | 0.73565 | import FWCore.ParameterSet.Config as cms
dtCalibValidation = cms.EDAnalyzer("DTCalibValidation",
# Write the histos on file
OutputMEsInRootFile = cms.bool(False),
# Lable to retrieve 2D segments from the event
segment2DLabel = cms.untracked.string('dt2DSegments'),
OutputFileName = cms.string('residuals.root'),
# Lable to retrieve 4D segments from the event
segment4DLabel = cms.untracked.string('dt4DSegments'),
debug = cms.untracked.bool(False),
# Lable to retrieve RecHits from the event
recHits1DLabel = cms.untracked.string('dt1DRecHits'),
# Detailed analysis
detailedAnalysis = cms.untracked.bool(False)
)
| 0 | 0 | 0 |
e1fbe5a597ea04720ab3fd872e65cdcabcfb3968 | 296 | py | Python | pets.py | cmotek/python_crashcourse | 29cbdd6699cd17192bb599d235852d547630d110 | [
"Apache-2.0"
] | null | null | null | pets.py | cmotek/python_crashcourse | 29cbdd6699cd17192bb599d235852d547630d110 | [
"Apache-2.0"
] | null | null | null | pets.py | cmotek/python_crashcourse | 29cbdd6699cd17192bb599d235852d547630d110 | [
"Apache-2.0"
] | null | null | null | pets = {
'Felix': {
'kind': 'cat',
'owner': 'Milton',
},
'Nelly': {
'kind': 'dog',
'owner': 'Stilton',
},
'Maurice': {
'kind': 'parrot',
'owner': 'Tyra',
},
}
for pet, pet_details in pets.items():
print(f"\n {pet_details['owner']}'s {pet_details['kind']}'s name is {pet}!") | 14.8 | 77 | 0.52027 | pets = {
'Felix': {
'kind': 'cat',
'owner': 'Milton',
},
'Nelly': {
'kind': 'dog',
'owner': 'Stilton',
},
'Maurice': {
'kind': 'parrot',
'owner': 'Tyra',
},
}
for pet, pet_details in pets.items():
print(f"\n {pet_details['owner']}'s {pet_details['kind']}'s name is {pet}!") | 0 | 0 | 0 |
39b03ba2647352737ac426866ae084f541734d40 | 10,030 | py | Python | tests/test_counting.py | MarcoMernberger/counting_sequences | 308c0edb0e869d46bad5bf4324f421ddecc326ad | [
"MIT"
] | null | null | null | tests/test_counting.py | MarcoMernberger/counting_sequences | 308c0edb0e869d46bad5bf4324f421ddecc326ad | [
"MIT"
] | null | null | null | tests/test_counting.py | MarcoMernberger/counting_sequences | 308c0edb0e869d46bad5bf4324f421ddecc326ad | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
from counting_sequences import SequenceCounter, CutadaptMatch
import pypipegraph as ppg
import pandas as pd
import pytest
import collections
__author__ = "MarcoMernberger"
__copyright__ = "MarcoMernberger"
__license__ = "mit"
@pytest.fixture
@pytest.mark.usefixtures("new_pipegraph_no_qc")
@pytest.mark.usefixtures("new_pipegraph_no_qc")
@pytest.mark.usefixtures("new_pipegraph_no_qc")
@pytest.mark.usefixtures("new_pipegraph_no_qc")
@pytest.mark.usefixtures("new_pipegraph_no_qc")
@pytest.mark.usefixtures("new_pipegraph_no_qc")
@pytest.mark.usefixtures("new_pipegraph_no_qc")
@pytest.mark.usefixtures("new_pipegraph_no_qc")
@pytest.mark.usefixtures("new_pipegraph_no_qc")
| 41.791667 | 315 | 0.727119 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
from counting_sequences import SequenceCounter, CutadaptMatch
import pypipegraph as ppg
import pandas as pd
import pytest
import collections
__author__ = "MarcoMernberger"
__copyright__ = "MarcoMernberger"
__license__ = "mit"
@pytest.fixture
def scouter(tmpdir):
scouter = SequenceCounter(
sequence_file_path=Path(__file__).parent.parent / "data" / "seq_in.csv",
name=None,
seqs_to_trim_reads=("CCTCTT"),
seqs_to_trim_predefined=("CCTCTT"),
trimmed_length=150,
result_folder=Path(tmpdir) / "test",
)
return scouter
def test_init(scouter, tmpdir):
assert scouter.name == "SC_CCTCTT_150"
assert scouter.start_seq_to_trim == "CCTCTT"
assert scouter.start_seq_len == 6
assert scouter.trimmed_length == 150
assert scouter.result_dir.exists()
assert scouter.sequence_file_path.name == "seq_in.csv"
@pytest.mark.usefixtures("new_pipegraph_no_qc")
def test_write_trim_predefines(tmpdir, scouter):
scouter.write_predefined_sequences()
outputfile = scouter.result_dir / "predefined_sequences.tsv"
ppg.run_pipegraph()
df = pd.read_csv(outputfile, sep="\t")
scouter.assert_predefined(df["Full Sequence"].values, df["Sequence"].values)
assert outputfile.exists()
df_new = pd.read_csv(outputfile, sep="\t")
df_new.index = df_new["Name"]
print(df.head())
assert df_new.loc["1>A_test3"]["Duplicate"]
assert df_new.loc["1>A_test4"]["Duplicate"]
assert df_new.loc["1>A_test3"]["Deduplicated"]
assert not df_new.loc["1>A_test4"]["Deduplicated"]
assert df_new.loc["1>A_test3"]["Duplicate Entries"] == "1>A_test3;1>A_test4"
@pytest.mark.usefixtures("new_pipegraph_no_qc")
def test_write_trim_predefine_alert1(tmpdir, scouter):
scouter = SequenceCounter(
sequence_file_path=Path(__file__).parent.parent / "data" / "seq_fail1.csv",
name=None,
start_seq_to_trim="TTGCTTTACCTCCTTTT",
trimmed_length=133,
result_folder=Path(tmpdir) / "test",
)
df_sequence_df = pd.read_csv(scouter.sequence_file_path, sep="\t")
df_sequence_df["Name"] = [
f"{a}_{b}"
for a, b in zip(
df_sequence_df["Alteration"].astype(str),
df_sequence_df["Effect"].astype(str),
)
]
index_function = scouter._find_index
df_sequence_df = df_sequence_df.rename(columns={"Sequence": "Full Sequence"})
sequences = []
seen = collections.defaultdict(set)
for _, row in df_sequence_df.iterrows():
fullseq = row["Full Sequence"]
seen[fullseq].add(row["Name"])
index1 = index_function(fullseq)
index2 = index1 + min(len(fullseq), scouter.trimmed_length)
seq = fullseq[index1:index2]
sequences.append(seq)
df_sequence_df["Sequence"] = sequences
duplicates = []
dup_names = []
for seq in df_sequence_df["Full Sequence"].values:
dups = seen[seq]
duplicates.append(len(dups) > 1)
dup_names.append(",".join(list(dups)))
df_sequence_df["Duplicate"] = duplicates
df_sequence_df["Duplicate Entries"] = dup_names
with pytest.raises(Exception):
scouter.assert_predefined(
df_sequence_df["Full Sequence"].values, df_sequence_df["Sequence"].values
)
@pytest.mark.usefixtures("new_pipegraph_no_qc")
def test_write_trim_predefine_alert2(tmpdir, scouter):
scouter = SequenceCounter(
sequence_file_path=Path(__file__).parent.parent / "data" / "seq_fail2.csv",
name=None,
start_seq_to_trim="TTGCTTTACCTCCTTTT",
trimmed_length=133,
result_folder=Path(tmpdir) / "test",
)
df_sequence_df = pd.read_csv(scouter.sequence_file_path, sep="\t")
df_sequence_df["Name"] = [
f"{a}_{b}"
for a, b in zip(
df_sequence_df["Alteration"].astype(str),
df_sequence_df["Effect"].astype(str),
)
]
index_function = scouter._find_index
df_sequence_df = df_sequence_df.rename(columns={"Sequence": "Full Sequence"})
sequences = []
seen = collections.defaultdict(set)
for _, row in df_sequence_df.iterrows():
fullseq = row["Full Sequence"]
seen[fullseq].add(row["Name"])
index1 = index_function(fullseq)
index2 = index1 + min(len(fullseq), scouter.trimmed_length)
seq = fullseq[index1:index2]
sequences.append(seq)
df_sequence_df["Sequence"] = sequences
duplicates = []
dup_names = []
for seq in df_sequence_df["Full Sequence"].values:
dups = seen[seq]
duplicates.append(len(dups) > 1)
dup_names.append(",".join(list(dups)))
df_sequence_df["Duplicate"] = duplicates
df_sequence_df["Duplicate Entries"] = dup_names
with pytest.raises(Exception):
scouter.assert_predefined(
df_sequence_df["Full Sequence"].values, df_sequence_df["Sequence"].values
)
@pytest.mark.usefixtures("new_pipegraph_no_qc")
def test_write_fastq(tmpdir, raw_lane, scouter):
scouter.write_fastq_count(raw_lane)
ppg.run_pipegraph()
output_file = scouter.result_dir / f"{raw_lane.name}_{scouter.name}_all_reads.tsv"
assert output_file.exists()
@pytest.mark.usefixtures("new_pipegraph_no_qc")
def test_write_fastq_trimmed(tmpdir, raw_lane, scouter):
scouter.write_fastq_count_trimmed(raw_lane)
ppg.run_pipegraph()
output_file = scouter.result_dir / f"{raw_lane.name}_{scouter.name}_all_reads.tsv"
output_file2 = (
scouter.result_dir / f"{raw_lane.name}_{scouter.name}_all_reads_trimmed.tsv"
)
assert output_file.exists()
assert output_file2.exists()
@pytest.mark.usefixtures("new_pipegraph_no_qc")
def test_write_count_table(tmpdir, raw_lane, scouter):
scouter.write_count_table(raw_lane, row_order=None)
ppg.run_pipegraph()
output_file = (
scouter.result_dir / f"{raw_lane.name}_{scouter.name}_sequence_count.tsv"
)
output_file2 = (
scouter.result_dir
/ f"{raw_lane.name}_{scouter.name}_sequence_count_unmatched.tsv"
)
assert output_file.exists()
assert output_file2.exists()
@pytest.mark.usefixtures("new_pipegraph_no_qc")
def test_count_fastq(tmpdir, raw_lane, scouter):
scouter.write_fastq_count(raw_lane)
ppg.run_pipegraph()
output_file = scouter.result_dir / f"{raw_lane.name}_{scouter.name}_all_reads.tsv"
df = pd.read_csv(output_file, sep="\t")
expected = {
"CGTACAAGAGACAAGCAATCAGTGAGGAATCAGAGGCCTCCGGACCCTGGGCAACCAGCCCTGTCGTCTCTCCAGCCCCAGCTGCTCACCATCGCTATCTGAGCGCCACTCTTGTTGGGGCCAGCGCCTCCCACCTTCCCTCTTTTGCTTTACCTCCTTTTAGTTGGCCTTGCCCCGGCCCCGGTCCCTTGCCAAAATGTCTTGTTTAGCCCCGGGTGCTCCTGTCGGGTCTTGACTGATTCACACTTGATATTCTTGTCTTCTGGTTCTTGCTCTGATGAGCACACGTCTGCACTCTTGT": 2,
"CGTACAAGAGACAAGCAATCAGTGAGGAATCCCTCTTTTGCTTTACCTCCTTTTAGTTGGCCTTGCCCCGGCCCCGGTCCCTTGCCAAAATGTCTTGTTTAGCCCCGGGGTGCTCCTGTCGGGTCTTGACTGATTCACACTTGATATTCTTGTCTTCTGGTTCTTGCTCTGATGAGCACACGTCTGC": 1,
"CGTACAAGAGACAAGCAATCAGTGAGGAATCCCTCTTTTGCTTTACCTCCTTTTAGTTGGCCTTGCCCCGGCCCCGGTCCCTTGCCAAAATGTCTTGTTTAGCCCCGGGGTGCTCCTGTCGGGTCTTGACTGATTCACACTTGATATTCTTGTCTTCTGGTTCTTGCTCTGATGAGCACACGTCTGCACACACACA": 1,
"CGTACAAGAGACAAGCAATCAGTGAGGAATCCCTCTTTTGCTTTACCTCCTTTTAGTTGGCCTTGCCCCGGCCCCGGTCCCTTGCCAAAATGTCTTGTTTAGCCCCGGGGTGCTCCTGTCGGGTCTTGACTGATTCACACTTGATATTCTTGTCTTCTGGTTCTTGCTCTGATGAGCACACGTCTG": 1,
"AGGAATCCCTCTTTTGCTTTACCTCCTTTTAGCCTCTTTTGCCCCGGCCCCGGTCCCTTGCCAAAATGTCTTGTTTAGCCCCGGGTGCTCCTGTCGGGTCTTGACTGATTCACACTTGATATTCTTGTCTTCTGGTTCTTGCTCTGATGAGCACACGTCTGCAACCA": 2,
"AGGAATCGCTTTACCTCCTTTTAGTTGAAATTGCCCCGGCCCCGGTCCCTTGCCAAAATGTCTTGTTTAGCCCCGGGTGCTCCTGTCGGGTCTTGACTGATTCACACTTGATATTCTTGTCTTCTGGTTCTTGCTCTGATGAGCACACGTCTGCAACCA": 1,
}
for _, row in df.iterrows():
assert expected[row["Sequence"]] == row["Count"]
@pytest.mark.usefixtures("new_pipegraph_no_qc")
def test_count_fastq_trimmed(tmpdir, raw_lane, scouter):
scouter.write_fastq_count_trimmed(raw_lane)
ppg.run_pipegraph()
output_file = (
scouter.result_dir / f"{raw_lane.name}_{scouter.name}_all_reads_trimmed.tsv"
)
df = pd.read_csv(output_file, sep="\t")
expected = {
"TTGCTTTACCTCCTTTTAGTTGGCCTTGCCCCGGCCCCGGTCCCTTGCCAAAATGTCTTGTTTAGCCCCGGGTGCTCCTGTCGGGTCTTGACTGATTCACACTTGATATTCTTGTCTTCTGGTTCTTGCTCTGATGAGCACACGTCTGCA": 2,
"TTGCTTTACCTCCTTTTAGTTGGCCTTGCCCCGGCCCCGGTCCCTTGCCAAAATGTCTTGTTTAGCCCCGGGGTGCTCCTGTCGGGTCTTGACTGATTCACACTTGATATTCTTGTCTTCTGGTTCTTGCTCTGATGAGCACACGTCTGC": 2,
"TTGCTTTACCTCCTTTTAGTTGGCCTTGCCCCGGCCCCGGTCCCTTGCCAAAATGTCTTGTTTAGCCCCGGGGTGCTCCTGTCGGGTCTTGACTGATTCACACTTGATATTCTTGTCTTCTGGTTCTTGCTCTGATGAGCACACGTCTG": 1,
"TTGCTTTACCTCCTTTTAGCCTCTTTTGCCCCGGCCCCGGTCCCTTGCCAAAATGTCTTGTTTAGCCCCGGGTGCTCCTGTCGGGTCTTGACTGATTCACACTTGATATTCTTGTCTTCTGGTTCTTGCTCTGATGAGCACACGTCTGCA": 2,
"AGGAATCGCTTTACCTCCTTTTAGTTGAAATTGCCCCGGCCCCGGTCCCTTGCCAAAATGTCTTGTTTAGCCCCGGGTGCTCCTGTCGGGTCTTGACTGATTCACACTTGATATTCTTGTCTTCTGGTTCTTGCTCTGATGAGCACACGT": 1,
}
for _, row in df.iterrows():
assert expected[row["Sequence"]] == row["Count"]
@pytest.mark.usefixtures("new_pipegraph_no_qc")
def test_count_samples_fast(tmpdir, raw_lane, scouter):
scouter.write_count_table(raw_lane, row_order=None)
ppg.run_pipegraph()
output_file = (
scouter.result_dir / f"{raw_lane.name}_{scouter.name}_sequence_count.tsv"
)
df = pd.read_csv(output_file, sep="\t")
df.index = df.Name
df_predefined = pd.read_csv(scouter.sequence_file_path, sep="\t")
df_predefined["Name"] = [
f"{a}_{b}"
for a, b in zip(
df_predefined["Alteration"].astype(str),
df_predefined["Effect"].astype(str),
)
]
expected = {
"1>A_test1": 2,
"1>A_test2": 2,
"1>A_test3": 2,
"1>A_test4": 2,
"1>A_test5": 0,
}
for _, row in df_predefined.iterrows():
assert df.loc[row["Name"]]["Read Count"] == expected[row["Name"]]
| 9,020 | 0 | 243 |
e3bef5391b2e38b629afaa1fb33093dce383a64b | 1,329 | py | Python | main.py | shyrwinsia/streamlit-stock-predictor | 21736e78a1ba674b02b96136c66bc443d3ec7e8e | [
"MIT"
] | 1 | 2021-03-04T23:43:19.000Z | 2021-03-04T23:43:19.000Z | main.py | shyrwinsia/streamlit-stock-predictor | 21736e78a1ba674b02b96136c66bc443d3ec7e8e | [
"MIT"
] | null | null | null | main.py | shyrwinsia/streamlit-stock-predictor | 21736e78a1ba674b02b96136c66bc443d3ec7e8e | [
"MIT"
] | 1 | 2021-03-24T09:14:24.000Z | 2021-03-24T09:14:24.000Z | from datetime import date
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
from plotly import graph_objs as go
import streamlit as st
import yfinance as yf
START = "2010-01-01"
TODAY = date.today().strftime("%Y-%m-%d")
st.title("Stock Prediction App")
st.header('Parameters')
stocks = ("AAPL", "GOOG", "MSFT")
selected_stock = st.selectbox(
"Select dataset for prediction",
stocks
)
@st.cache
st.header('Stock Data')
data = load_data(selected_stock)
st.subheader("Raw data")
st.write(data.tail())
plot_raw_data()
n_years = st.slider("Years to predict:", 1, 5)
period = n_years * 365
df_train = data[['Date', 'Close']]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.subheader("Forecasted data")
st.write(forecast.tail())
st.write("Forecasted Time Series")
fig1 = plot_plotly(m, forecast, trend=True)
st.plotly_chart(fig1)
| 23.732143 | 84 | 0.720843 | from datetime import date
from fbprophet import Prophet
from fbprophet.plot import plot_plotly
from plotly import graph_objs as go
import streamlit as st
import yfinance as yf
START = "2010-01-01"
TODAY = date.today().strftime("%Y-%m-%d")
st.title("Stock Prediction App")
st.header('Parameters')
stocks = ("AAPL", "GOOG", "MSFT")
selected_stock = st.selectbox(
"Select dataset for prediction",
stocks
)
@st.cache
def load_data(ticker):
data = yf.download(ticker, START, TODAY)
data.reset_index(inplace=True)
return data
st.header('Stock Data')
data = load_data(selected_stock)
st.subheader("Raw data")
st.write(data.tail())
def plot_raw_data():
fig = go.Figure()
fig.add_trace(go.Line(x=data['Date'],y=data['Close'], name='Price'))
fig.layout.update(title_text="Time Series Data", xaxis_rangeslider_visible=True)
st.plotly_chart(fig)
plot_raw_data()
n_years = st.slider("Years to predict:", 1, 5)
period = n_years * 365
df_train = data[['Date', 'Close']]
df_train = df_train.rename(columns={"Date": "ds", "Close": "y"})
m = Prophet()
m.fit(df_train)
future = m.make_future_dataframe(periods=period)
forecast = m.predict(future)
st.subheader("Forecasted data")
st.write(forecast.tail())
st.write("Forecasted Time Series")
fig1 = plot_plotly(m, forecast, trend=True)
st.plotly_chart(fig1)
| 295 | 0 | 45 |
faf47f83fcdf9f06684e52d0191979f327a76e82 | 236 | py | Python | SCRAPE/Lib/site-packages/twisted/plugins/twisted_socks.py | Chinmoy-Prasad-Dutta/scrapy_scraper | 09f6abfc3bcf10ee28f486d83b450c89a07e066e | [
"MIT"
] | 4,612 | 2015-01-01T12:57:23.000Z | 2022-03-30T01:08:23.000Z | SCRAPE/Lib/site-packages/twisted/plugins/twisted_socks.py | Chinmoy-Prasad-Dutta/scrapy_scraper | 09f6abfc3bcf10ee28f486d83b450c89a07e066e | [
"MIT"
] | 1,243 | 2015-01-23T17:23:59.000Z | 2022-03-28T13:46:17.000Z | SCRAPE/Lib/site-packages/twisted/plugins/twisted_socks.py | Chinmoy-Prasad-Dutta/scrapy_scraper | 09f6abfc3bcf10ee28f486d83b450c89a07e066e | [
"MIT"
] | 1,236 | 2015-01-13T14:41:26.000Z | 2022-03-17T07:12:36.000Z | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.application.service import ServiceMaker
TwistedSOCKS = ServiceMaker(
"Twisted SOCKS", "twisted.tap.socks", "A SOCKSv4 proxy service.", "socks"
)
| 26.222222 | 77 | 0.754237 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.application.service import ServiceMaker
TwistedSOCKS = ServiceMaker(
"Twisted SOCKS", "twisted.tap.socks", "A SOCKSv4 proxy service.", "socks"
)
| 0 | 0 | 0 |
4a9d33ca5730d7016f2b8c3ae368a8f2fd87a9e7 | 2,976 | py | Python | 3-Notebooks/TTS/.ipynb_checkpoints/syllables_linguistic-checkpoint.py | iffishells/Pushto-TTS-FYP | 7ed3a180ba4c1e609ae5aa5e76bfd093a3d3d140 | [
"Apache-2.0"
] | 2 | 2021-12-06T04:28:18.000Z | 2021-12-20T03:33:00.000Z | 3-Notebooks/TTS-fyp-01/TTS/syllables_linguistic.py | iffishells/Pushto-TTS-FYP | 7ed3a180ba4c1e609ae5aa5e76bfd093a3d3d140 | [
"Apache-2.0"
] | null | null | null | 3-Notebooks/TTS-fyp-01/TTS/syllables_linguistic.py | iffishells/Pushto-TTS-FYP | 7ed3a180ba4c1e609ae5aa5e76bfd093a3d3d140 | [
"Apache-2.0"
] | 1 | 2021-12-29T16:44:59.000Z | 2021-12-29T16:44:59.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 23 17:32:31 2021
@author: ifti
"""
from ipapy import UNICODE_TO_IPA
from ipapy import is_valid_ipa
from ipapy.ipachar import IPAConsonant
from ipapy.ipachar import IPAVowel
from ipapy.ipastring import IPAString
from IPATranscription import IPA
import importlib
from importlib import reload
def IPA_of_token(token):
'''
IPA_of_token() is a linguistic function to find the IPA of Pasto letter
parameter : it take a token which actaully a single pashto word
return : it will return the IPA of given pashto word from the lexicon
'''
# iterate over the each token
#print("token : {}".format(token))
ipa = []
temp =""
for char in token:
#print("char : {} , {} ".format(char ,IPA(char)))
temp = str(IPA(char)).replace("[", "")
temp = temp.replace("]", "")
temp = temp.replace(",", "")
if temp =="ʔ":
print("dump")
f = open("Datasets/not_available_ipa.txt","+w" ,encoding='utf-8')
f.write(char)
f.close()
# print(temp,len(temp))
# if more then IPA then we will use first for the time being
ipa.append(temp)
#print(ipa)
return ipa
def is_valid_syllable(cv):
'''
is_valid_syllable() is helper function of linguistic part
parameter : it will syllables
return : it will return the string to tell you it is valid syllable or not.
'''
if cv in ["V","VC","CV","CVC","CCV","CVCC","CCVC","CCCV","CCCVC"]:
return "Valid syllables"
else:
return "Not Valid syllables"
def make_syllables(IPA_list):
'''
make_syllables() is the function of linguistic part of the program and
it will make the syllable of the given IPA
paramter : it takes the list of ipa of the token ,
return : it will return the syllables of the ipa
'''
#=============================================================================
#reverse_list = reversed(IPA_list)
ipa_str = ""
cv_Form = ""
for char_ipa in range(0,len(IPA_list)):
#print("ipa :",char_ipa)
if IPA_list[char_ipa] =="None":
continue
if IPA_list[char_ipa] in ['əi','ə','u','ɑ','ā','ai','a','i','o','u','e','əi','A','E','I','U','O' ]:
cv_Form+="V"
ipa_str += IPA_list[char_ipa]
else:
#print(char_ipa)
cv_Form+="C"
ipa_str += IPA_list[char_ipa] + " "
print(cv_Form)
print(is_valid_syllable(cv_Form))
return ipa_str
# =============================================================================
if __name__ == "__main__":
print()
print(make_syllables(IPA_of_token("اړنګمن"))) | 27.302752 | 108 | 0.523522 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 23 17:32:31 2021
@author: ifti
"""
from ipapy import UNICODE_TO_IPA
from ipapy import is_valid_ipa
from ipapy.ipachar import IPAConsonant
from ipapy.ipachar import IPAVowel
from ipapy.ipastring import IPAString
from IPATranscription import IPA
import importlib
from importlib import reload
def IPA_of_token(token):
'''
IPA_of_token() is a linguistic function to find the IPA of Pasto letter
parameter : it take a token which actaully a single pashto word
return : it will return the IPA of given pashto word from the lexicon
'''
# iterate over the each token
#print("token : {}".format(token))
ipa = []
temp =""
for char in token:
#print("char : {} , {} ".format(char ,IPA(char)))
temp = str(IPA(char)).replace("[", "")
temp = temp.replace("]", "")
temp = temp.replace(",", "")
if temp =="ʔ":
print("dump")
f = open("Datasets/not_available_ipa.txt","+w" ,encoding='utf-8')
f.write(char)
f.close()
# print(temp,len(temp))
# if more then IPA then we will use first for the time being
ipa.append(temp)
#print(ipa)
return ipa
def is_valid_syllable(cv):
'''
is_valid_syllable() is helper function of linguistic part
parameter : it will syllables
return : it will return the string to tell you it is valid syllable or not.
'''
if cv in ["V","VC","CV","CVC","CCV","CVCC","CCVC","CCCV","CCCVC"]:
return "Valid syllables"
else:
return "Not Valid syllables"
def make_syllables(IPA_list):
'''
make_syllables() is the function of linguistic part of the program and
it will make the syllable of the given IPA
paramter : it takes the list of ipa of the token ,
return : it will return the syllables of the ipa
'''
#=============================================================================
#reverse_list = reversed(IPA_list)
ipa_str = ""
cv_Form = ""
for char_ipa in range(0,len(IPA_list)):
#print("ipa :",char_ipa)
if IPA_list[char_ipa] =="None":
continue
if IPA_list[char_ipa] in ['əi','ə','u','ɑ','ā','ai','a','i','o','u','e','əi','A','E','I','U','O' ]:
cv_Form+="V"
ipa_str += IPA_list[char_ipa]
else:
#print(char_ipa)
cv_Form+="C"
ipa_str += IPA_list[char_ipa] + " "
print(cv_Form)
print(is_valid_syllable(cv_Form))
return ipa_str
# =============================================================================
if __name__ == "__main__":
print()
print(make_syllables(IPA_of_token("اړنګمن"))) | 0 | 0 | 0 |
b47f30e36580e1ea28076cc641ade41a5cb14601 | 511 | py | Python | 03-fibonacci/main.py | arfon/vscode-course-sample | bde5929f228953221b888afc192628b0cb1d9e65 | [
"MIT"
] | 24 | 2020-07-27T19:30:09.000Z | 2021-06-05T23:25:09.000Z | 03-fibonacci/main.py | microsoft/course-sample | bde5929f228953221b888afc192628b0cb1d9e65 | [
"MIT"
] | null | null | null | 03-fibonacci/main.py | microsoft/course-sample | bde5929f228953221b888afc192628b0cb1d9e65 | [
"MIT"
] | 18 | 2020-07-31T11:08:06.000Z | 2022-01-13T21:16:29.000Z | #recursive approach
numTerms = int(input("How many terms of Fibonacci sequence to print? "))
# What are the first few terms of the fib seq?
# 0 1 1 2 3
# main method
# check if the number of terms is valid
if numTerms <= 0:
print("Please enter a positive integer")
else:
print("Fibonacci sequence:")
for i in range(numTerms):
print(fibonacci(i))
| 23.227273 | 72 | 0.645793 | #recursive approach
numTerms = int(input("How many terms of Fibonacci sequence to print? "))
# What are the first few terms of the fib seq?
# 0 1 1 2 3
# main method
def fibonacci(n):
if n < 1: ##makethis n <= 1 for it to work
return n
else:
return(fibonacci(n-1) + fibonacci(n-2))
# check if the number of terms is valid
if numTerms <= 0:
print("Please enter a positive integer")
else:
print("Fibonacci sequence:")
for i in range(numTerms):
print(fibonacci(i))
| 118 | 0 | 22 |
aaaeace4f720a637e9e3e0cf3bc7a967f8a6c3ed | 745 | py | Python | tests/test_pdf2images.py | zxytim/pdf2images | 60d861e74c539ee36072daa878189541670b8796 | [
"BSD-3-Clause"
] | 12 | 2019-07-19T17:04:05.000Z | 2020-11-13T01:17:05.000Z | tests/test_pdf2images.py | zxytim/pdf2images | 60d861e74c539ee36072daa878189541670b8796 | [
"BSD-3-Clause"
] | 4 | 2020-04-08T09:32:00.000Z | 2021-02-08T10:46:16.000Z | tests/test_pdf2images.py | zxytim/pdf2images | 60d861e74c539ee36072daa878189541670b8796 | [
"BSD-3-Clause"
] | 1 | 2020-03-28T17:08:39.000Z | 2020-03-28T17:08:39.000Z | import unittest
import pdf2images
import os
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
ASSETS_DIR = os.path.join(THIS_DIR, "assets")
| 29.8 | 81 | 0.651007 | import unittest
import pdf2images
import os
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
ASSETS_DIR = os.path.join(THIS_DIR, "assets")
class TestPDF2Images(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pdf_path = os.path.join(ASSETS_DIR, "Sequence Modeling 2019-04.pdf")
with open(cls.pdf_path, "rb") as f:
cls.pdf_data = f.read()
def test_basics(self):
self.assertEqual(pdf2images.get_num_pages_given_path(self.pdf_path), 175)
rst = pdf2images.pdf_data_to_thumbnails(
self.pdf_data, [0, 1, 2, 4, 8, 16, 32, 174], 100, 200
)
self.assertEqual(len(rst), 8)
for data in rst.values():
self.assertGreater(len(data), 0)
| 486 | 89 | 23 |
d6d3068d635f415c4533ac4f92fe450f06e2af86 | 64 | py | Python | mylib/image/__init__.py | uwitty/imattribute | 20c2130362f7b2b717bcdbddc4893ea82d1dbac5 | [
"MIT"
] | null | null | null | mylib/image/__init__.py | uwitty/imattribute | 20c2130362f7b2b717bcdbddc4893ea82d1dbac5 | [
"MIT"
] | null | null | null | mylib/image/__init__.py | uwitty/imattribute | 20c2130362f7b2b717bcdbddc4893ea82d1dbac5 | [
"MIT"
] | null | null | null | import os
| 10.666667 | 36 | 0.703125 | import os
def rootdir():
return os.path.dirname(__file__)
| 30 | 0 | 23 |
7dc1ecd380baa1211dc8aaccbd6b29a9018058eb | 18 | py | Python | shopify/version.py | twslade/shopify_python_api | eaf4684ae4da4e9cfe8fe459b76e70c1610a34b9 | [
"MIT"
] | 1 | 2020-04-17T21:23:34.000Z | 2020-04-17T21:23:34.000Z | shopify/version.py | twslade/shopify_python_api | eaf4684ae4da4e9cfe8fe459b76e70c1610a34b9 | [
"MIT"
] | null | null | null | shopify/version.py | twslade/shopify_python_api | eaf4684ae4da4e9cfe8fe459b76e70c1610a34b9 | [
"MIT"
] | 1 | 2020-10-03T19:48:39.000Z | 2020-10-03T19:48:39.000Z | VERSION = '8.0.0'
| 9 | 17 | 0.555556 | VERSION = '8.0.0'
| 0 | 0 | 0 |
50315219c6659e8a1910d6a0f8140aad18e3f3b0 | 21,601 | py | Python | pebbles/models.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 4 | 2017-05-11T14:50:32.000Z | 2020-01-10T09:02:27.000Z | pebbles/models.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 145 | 2017-04-07T11:01:58.000Z | 2019-12-11T15:30:23.000Z | pebbles/models.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 3 | 2017-10-25T12:36:16.000Z | 2018-04-26T08:49:34.000Z | import string
import random
from flask_bcrypt import Bcrypt
import names
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
from sqlalchemy.ext.hybrid import hybrid_property, Comparator
from sqlalchemy.schema import MetaData
from sqlalchemy.orm import backref
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
import uuid
import json
import datetime
import six
from pebbles.utils import validate_ssh_pubkey, get_full_blueprint_config, get_blueprint_fields_from_config
MAX_PASSWORD_LENGTH = 100
MAX_EMAIL_LENGTH = 128
MAX_NAME_LENGTH = 128
MAX_VARIABLE_KEY_LENGTH = 512
MAX_VARIABLE_VALUE_LENGTH = 512
MAX_NOTIFICATION_SUBJECT_LENGTH = 255
db = SQLAlchemy()
convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
db.Model.metadata = MetaData(naming_convention=convention)
bcrypt = Bcrypt()
NAME_ADJECTIVES = (
'happy',
'sad',
'bright',
'dark',
'blue',
'yellow',
'red',
'green',
'white',
'black',
'clever',
'witty',
'smiley',
)
group_banned_user = db.Table( # Secondary Table for many-to-many mapping
'groups_banned_users',
db.Column('group_id', db.String(32), db.ForeignKey('groups.id')),
db.Column('user_id', db.String(32), db.ForeignKey('users.id')), db.PrimaryKeyConstraint('group_id', 'user_id')
)
class NamespacedKeyValue(db.Model):
""" Stores key/value pair data, separated by namespaces
This model should be initialized by providing namespace and key as mandatory arguments.
It is highly recommended to have a schema for the JSON value field,
and provide it during model initialization.
"""
__tablename__ = 'namespaced_keyvalues'
namespace = db.Column(db.String(32), primary_key=True)
key = db.Column(db.String(128), primary_key=True)
_value = db.Column(db.Text)
_schema = db.Column(db.Text)
created_ts = db.Column(db.Float)
updated_ts = db.Column(db.Float)
@classmethod
def str_to_bool(cls, val):
""" Convert the string into boolean.
Useful when value comes from UI and becomes True even if False
By default, this function shall return False
"""
if val:
val = val.lower()
if val in ('true', u'true', '1'):
return True
return False
@hybrid_property
@schema.setter
@hybrid_property
@value.setter
| 31.626647 | 137 | 0.656729 | import string
import random
from flask_bcrypt import Bcrypt
import names
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
from sqlalchemy.ext.hybrid import hybrid_property, Comparator
from sqlalchemy.schema import MetaData
from sqlalchemy.orm import backref
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
import uuid
import json
import datetime
import six
from pebbles.utils import validate_ssh_pubkey, get_full_blueprint_config, get_blueprint_fields_from_config
MAX_PASSWORD_LENGTH = 100
MAX_EMAIL_LENGTH = 128
MAX_NAME_LENGTH = 128
MAX_VARIABLE_KEY_LENGTH = 512
MAX_VARIABLE_VALUE_LENGTH = 512
MAX_NOTIFICATION_SUBJECT_LENGTH = 255
db = SQLAlchemy()
convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
db.Model.metadata = MetaData(naming_convention=convention)
bcrypt = Bcrypt()
NAME_ADJECTIVES = (
'happy',
'sad',
'bright',
'dark',
'blue',
'yellow',
'red',
'green',
'white',
'black',
'clever',
'witty',
'smiley',
)
class CaseInsensitiveComparator(Comparator):
def __eq__(self, other):
return func.lower(self.__clause_element__()) == func.lower(other)
def load_column(column):
try:
value = json.loads(column)
except:
value = {}
return value
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.String(32), primary_key=True)
# eppn is manadatory and database objects are retrieved based on eppn
_eppn = db.Column('eppn', db.String(MAX_EMAIL_LENGTH), unique=True)
# email_id field is used only for sending emails.
_email_id = db.Column('email_id', db.String(MAX_EMAIL_LENGTH))
password = db.Column(db.String(MAX_PASSWORD_LENGTH))
joining_date = db.Column(db.DateTime)
expiry_date = db.Column(db.DateTime)
is_admin = db.Column(db.Boolean, default=False)
is_group_owner = db.Column(db.Boolean, default=False)
is_active = db.Column(db.Boolean, default=False)
is_deleted = db.Column(db.Boolean, default=False)
is_blocked = db.Column(db.Boolean, default=False)
credits_quota = db.Column(db.Float, default=1.0)
latest_seen_notification_ts = db.Column(db.DateTime)
group_quota = db.Column(db.Float)
blueprint_quota = db.Column(db.Float)
instances = db.relationship('Instance', backref='user', lazy='dynamic')
activation_tokens = db.relationship('ActivationToken', backref='user', lazy='dynamic')
groups = db.relationship("GroupUserAssociation", back_populates="user", lazy="dynamic")
def __init__(self, eppn, password=None, is_admin=False, email_id=None, expiry_date=None):
self.id = uuid.uuid4().hex
self.eppn = eppn
self.is_admin = is_admin
self.joining_date = datetime.datetime.utcnow()
self.expiry_date = expiry_date
if email_id:
self.email_id = email_id
if password:
self.set_password(password)
self.is_active = True
else:
self.set_password(uuid.uuid4().hex)
def __eq__(self, other):
return self.id == other.id
@hybrid_property
def eppn(self):
return self._eppn.lower()
@eppn.setter
def eppn(self, value):
self._eppn = value.lower()
@eppn.comparator
def eppn(cls):
return CaseInsensitiveComparator(cls._eppn)
@hybrid_property
def email_id(self):
if self._email_id:
return self._email_id.lower()
@email_id.setter
def email_id(self, value):
if value:
self._email_id = value.lower()
@email_id.comparator
def email_id(cls):
return CaseInsensitiveComparator(cls._email_id)
def delete(self):
if self.is_deleted:
return
self.eppn = self.eppn + datetime.datetime.utcnow().strftime("-%s")
# Email_id is also renamed to allow users
# to be deleted and invited again with same email_id
if self.email_id:
self.email_id = self.email_id + datetime.datetime.utcnow().strftime("-%s")
self.activation_tokens.delete()
self.is_deleted = True
self.is_active = False
def set_password(self, password):
self.password = bcrypt.generate_password_hash(password).decode('utf-8')
def check_password(self, password):
if self.can_login():
return bcrypt.check_password_hash(self.password, password)
def generate_auth_token(self, app_secret, expires_in=43200):
s = Serializer(app_secret, expires_in=expires_in)
return s.dumps({'id': self.id}).decode('utf-8')
def calculate_credits_spent(self):
return sum(instance.credits_spent() for instance in self.instances.all())
def quota_exceeded(self):
return self.calculate_credits_spent() >= self.credits_quota
def can_login(self):
return not self.is_deleted and self.is_active and not self.is_blocked
@hybrid_property
def managed_groups(self):
groups = []
group_user_objs = GroupUserAssociation.query.filter_by(user_id=self.id, manager=True).all()
for group_user_obj in group_user_objs:
groups.append(group_user_obj.group)
return groups
def unseen_notifications(self):
q = Notification.query
if self.latest_seen_notification_ts:
q = q.filter(Notification.broadcasted > self.latest_seen_notification_ts)
return q.all()
@staticmethod
def verify_auth_token(token, app_secret):
s = Serializer(app_secret)
try:
data = s.loads(token)
except:
return None
user = User.query.get(data['id'])
if user and user.can_login():
return user
def __repr__(self):
return self.eppn
def __hash__(self):
return hash(self.eppn)
group_banned_user = db.Table( # Secondary Table for many-to-many mapping
'groups_banned_users',
db.Column('group_id', db.String(32), db.ForeignKey('groups.id')),
db.Column('user_id', db.String(32), db.ForeignKey('users.id')), db.PrimaryKeyConstraint('group_id', 'user_id')
)
class GroupUserAssociation(db.Model): # Association Object for many-to-many mapping
__tablename__ = 'groups_users_association'
group_id = db.Column(db.String(32), db.ForeignKey('groups.id'), primary_key=True)
user_id = db.Column(db.String(32), db.ForeignKey('users.id'), primary_key=True)
manager = db.Column(db.Boolean, default=False)
owner = db.Column(db.Boolean, default=False)
user = db.relationship("User", back_populates="groups")
group = db.relationship("Group", back_populates="users")
class Group(db.Model):
STATE_ACTIVE = 'active'
STATE_ARCHIVED = 'archived'
STATE_DELETED = 'deleted'
VALID_STATES = (
STATE_ACTIVE,
STATE_ARCHIVED,
STATE_DELETED
)
__tablename__ = 'groups'
id = db.Column(db.String(32), primary_key=True)
name = db.Column(db.String(32))
_join_code = db.Column(db.String(64))
description = db.Column(db.Text)
# current_status when created is "active". Later there is option to be "archived".
_current_status = db.Column('current_status', db.String(32), default='active')
users = db.relationship("GroupUserAssociation", back_populates="group", lazy='dynamic', cascade="all, delete-orphan")
banned_users = db.relationship('User', secondary=group_banned_user, backref=backref('banned_groups', lazy="dynamic"), lazy='dynamic')
blueprints = db.relationship('Blueprint', backref='group', lazy='dynamic')
def __init__(self, name):
self.id = uuid.uuid4().hex
self.name = name
self.join_code = name
self._current_status = Group.STATE_ACTIVE
@hybrid_property
def join_code(self):
return self._join_code
@join_code.setter
def join_code(self, name):
name = name.replace(' ', '').lower()
ascii_name = name.encode('ascii', 'ignore').decode()
random_chars = ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(5))
self._join_code = ascii_name + '-' + random_chars
@hybrid_property
def current_status(self):
return self._current_status
@current_status.setter
def current_status(self, value):
if value in Group.VALID_STATES:
self._current_status = value
else:
raise ValueError("'%s' is not a valid state for Groups" % value)
class Notification(db.Model):
__tablename__ = 'notifications'
id = db.Column(db.String(32), primary_key=True)
broadcasted = db.Column(db.DateTime)
subject = db.Column(db.String(MAX_NOTIFICATION_SUBJECT_LENGTH))
message = db.Column(db.Text)
def __init__(self):
self.id = uuid.uuid4().hex
self.broadcasted = datetime.datetime.utcnow()
class Keypair(db.Model):
__tablename__ = 'keypairs'
id = db.Column(db.String(32), primary_key=True)
user_id = db.Column(db.String(32), db.ForeignKey('users.id'))
_public_key = db.Column(db.String(1025))
def __init__(self):
self.id = uuid.uuid4().hex
@hybrid_property
def public_key(self):
return self._public_key
@public_key.setter
def public_key(self, value):
if not validate_ssh_pubkey(value):
raise ValueError("Not a valid SSH public key")
self._public_key = value
class ActivationToken(db.Model):
__tablename__ = 'activation_tokens'
token = db.Column(db.String(32), primary_key=True)
user_id = db.Column(db.String(32), db.ForeignKey('users.id'))
def __init__(self, user):
self.token = uuid.uuid4().hex
self.user_id = user.id
class Plugin(db.Model):
__tablename__ = 'plugins'
id = db.Column(db.String(32), primary_key=True)
name = db.Column(db.String(32))
_schema = db.Column('schema', db.Text)
_form = db.Column('form', db.Text)
_model = db.Column('model', db.Text)
def __init__(self):
self.id = uuid.uuid4().hex
@hybrid_property
def schema(self):
return load_column(self._schema)
@schema.setter
def schema(self, value):
self._schema = json.dumps(value)
@hybrid_property
def form(self):
return load_column(self._form)
@form.setter
def form(self, value):
self._form = json.dumps(value)
@hybrid_property
def model(self):
return load_column(self._model)
@model.setter
def model(self, value):
self._model = json.dumps(value)
class BlueprintTemplate(db.Model):
__tablename__ = 'blueprint_templates'
id = db.Column(db.String(32), primary_key=True)
name = db.Column(db.String(MAX_NAME_LENGTH))
_config = db.Column('config', db.Text)
is_enabled = db.Column(db.Boolean, default=False)
plugin = db.Column(db.String(32), db.ForeignKey('plugins.id'))
blueprints = db.relationship('Blueprint', backref='template', lazy='dynamic')
_blueprint_schema = db.Column('blueprint_schema', db.Text)
_blueprint_form = db.Column('blueprint_form', db.Text)
_blueprint_model = db.Column('blueprint_model', db.Text)
_allowed_attrs = db.Column('allowed_attrs', db.Text)
def __init__(self):
self.id = uuid.uuid4().hex
@hybrid_property
def config(self):
return load_column(self._config)
@config.setter
def config(self, value):
self._config = json.dumps(value)
@hybrid_property
def blueprint_schema(self):
return load_column(self._blueprint_schema)
@blueprint_schema.setter
def blueprint_schema(self, value):
self._blueprint_schema = json.dumps(value)
@hybrid_property
def blueprint_form(self):
return load_column(self._blueprint_form)
@blueprint_form.setter
def blueprint_form(self, value):
self._blueprint_form = json.dumps(value)
@hybrid_property
def blueprint_model(self):
return load_column(self._blueprint_model)
@blueprint_model.setter
def blueprint_model(self, value):
self._blueprint_model = json.dumps(value)
@hybrid_property
def allowed_attrs(self):
return load_column(self._allowed_attrs)
@allowed_attrs.setter
def allowed_attrs(self, value):
self._allowed_attrs = json.dumps(value)
class Blueprint(db.Model):
STATE_ACTIVE = 'active'
STATE_ARCHIVED = 'archived'
STATE_DELETED = 'deleted'
VALID_STATES = (
STATE_ACTIVE,
STATE_ARCHIVED,
STATE_DELETED,
)
__tablename__ = 'blueprints'
id = db.Column(db.String(32), primary_key=True)
name = db.Column(db.String(MAX_NAME_LENGTH))
template_id = db.Column(db.String(32), db.ForeignKey('blueprint_templates.id'))
_config = db.Column('config', db.Text)
is_enabled = db.Column(db.Boolean, default=False)
expiry_time = db.Column(db.DateTime)
gpu_enabled = db.Column(db.Boolean, default=False)
instances = db.relationship('Instance', backref='blueprint', lazy='dynamic')
group_id = db.Column(db.String(32), db.ForeignKey('groups.id'))
# current_status when created is "active". Later there are options to be "archived" or "deleted".
_current_status = db.Column('current_status', db.String(32), default='active')
def __init__(self):
self.id = uuid.uuid4().hex
self._current_status = Blueprint.STATE_ACTIVE
@hybrid_property
def config(self):
return load_column(self._config)
@config.setter
def config(self, value):
self._config = json.dumps(value)
# 'full_config' property of Blueprint model will take the template attributes into account too
@hybrid_property
def full_config(self):
return get_full_blueprint_config(self)
@hybrid_property
def current_status(self):
return self._current_status
@current_status.setter
def current_status(self, value):
if value in Blueprint.VALID_STATES:
self._current_status = value
else:
raise ValueError("'%s' is not a valid status for Blueprint" % value)
@hybrid_property
def maximum_lifetime(self):
return get_blueprint_fields_from_config(self, 'maximum_lifetime')
@hybrid_property
def preallocated_credits(self):
return get_blueprint_fields_from_config(self, 'preallocated_credits')
@hybrid_property
def cost_multiplier(self):
return get_blueprint_fields_from_config(self, 'cost_multiplier')
def cost(self, duration=None):
if not duration:
duration = self.maximum_lifetime
return self.cost_multiplier * duration / 3600
def __repr__(self):
return self.name or "Unnamed blueprint"
class Instance(db.Model):
STATE_QUEUEING = 'queueing'
STATE_PROVISIONING = 'provisioning'
STATE_RUNNING = 'running'
STATE_DELETING = 'deleting'
STATE_DELETED = 'deleted'
STATE_FAILED = 'failed'
VALID_STATES = (
STATE_QUEUEING,
STATE_PROVISIONING,
STATE_RUNNING,
STATE_DELETING,
STATE_DELETED,
STATE_FAILED,
)
__tablename__ = 'instances'
id = db.Column(db.String(32), primary_key=True)
user_id = db.Column(db.String(32), db.ForeignKey('users.id'))
blueprint_id = db.Column(db.String(32), db.ForeignKey('blueprints.id'))
name = db.Column(db.String(64), unique=True)
public_ip = db.Column(db.String(64))
client_ip = db.Column(db.String(64))
provisioned_at = db.Column(db.DateTime)
deprovisioned_at = db.Column(db.DateTime)
errored = db.Column(db.Boolean, default=False)
_state = db.Column('state', db.String(32))
to_be_deleted = db.Column(db.Boolean, default=False)
error_msg = db.Column(db.String(256))
_instance_data = db.Column('instance_data', db.Text)
def __init__(self, blueprint, user):
self.id = uuid.uuid4().hex
self.blueprint_id = blueprint.id
self.blueprint = blueprint
self.user_id = user.id
self._state = Instance.STATE_QUEUEING
def credits_spent(self, duration=None):
if self.errored:
return 0.0
if not duration:
duration = self.runtime
if self.blueprint.preallocated_credits:
duration = self.blueprint.maximum_lifetime
try:
cost_multiplier = self.blueprint.cost_multiplier
except:
cost_multiplier = 1.0
return cost_multiplier * duration / 3600
@hybrid_property
def runtime(self):
if not self.provisioned_at:
return 0.0
if not self.deprovisioned_at:
diff = datetime.datetime.utcnow() - self.provisioned_at
else:
diff = self.deprovisioned_at - self.provisioned_at
return diff.total_seconds()
@hybrid_property
def instance_data(self):
return load_column(self._instance_data)
@instance_data.setter
def instance_data(self, value):
self._instance_data = json.dumps(value)
@hybrid_property
def state(self):
return self._state
@state.setter
def state(self, value):
if value in Instance.VALID_STATES:
self._state = value
else:
raise ValueError("'%s' is not a valid state" % value)
@staticmethod
def generate_name(prefix):
return '%s%s-the-%s' % (prefix, names.get_first_name().lower(), random.choice(NAME_ADJECTIVES))
class InstanceLog(db.Model):
__tablename__ = 'instance_logs'
id = db.Column(db.String(32), primary_key=True)
instance_id = db.Column(db.String(32), db.ForeignKey('instances.id'), index=True, unique=False)
log_level = db.Column(db.String(8))
log_type = db.Column(db.String(64))
timestamp = db.Column(db.Float)
message = db.Column(db.Text)
def __init__(self, instance_id):
self.id = uuid.uuid4().hex
self.instance_id = instance_id
class InstanceToken(db.Model):
__tablename__ = 'instance_tokens'
token = db.Column(db.String(32), primary_key=True)
instance_id = db.Column(db.String(32), db.ForeignKey('instances.id'))
expires_on = db.Column(db.DateTime)
def __init__(self, instance_id, instance_seconds):
self.token = uuid.uuid4().hex
self.instance_id = instance_id
self.expires_on = datetime.datetime.utcnow() + datetime.timedelta(seconds=instance_seconds)
def __repr__(self):
return self.token
class Lock(db.Model):
__tablename__ = 'locks'
lock_id = db.Column(db.String(64), primary_key=True, unique=True)
acquired_at = db.Column(db.DateTime)
def __init__(self, lock_id):
self.lock_id = lock_id
self.acquired_at = datetime.datetime.utcnow()
class NamespacedKeyValue(db.Model):
""" Stores key/value pair data, separated by namespaces
This model should be initialized by providing namespace and key as mandatory arguments.
It is highly recommended to have a schema for the JSON value field,
and provide it during model initialization.
"""
__tablename__ = 'namespaced_keyvalues'
namespace = db.Column(db.String(32), primary_key=True)
key = db.Column(db.String(128), primary_key=True)
_value = db.Column(db.Text)
_schema = db.Column(db.Text)
created_ts = db.Column(db.Float)
updated_ts = db.Column(db.Float)
def __init__(self, namespace, key, schema=None):
self.namespace = namespace
self.key = key
self.schema = schema
@classmethod
def str_to_bool(cls, val):
""" Convert the string into boolean.
Useful when value comes from UI and becomes True even if False
By default, this function shall return False
"""
if val:
val = val.lower()
if val in ('true', u'true', '1'):
return True
return False
@hybrid_property
def schema(self):
return load_column(self._schema)
@schema.setter
def schema(self, schema):
self._schema = json.dumps(schema)
@hybrid_property
def value(self):
return load_column(self._value)
@value.setter
def value(self, val):
if self.schema:
try:
schema_obj = self.schema['properties']
required_fields = self.schema['required']
except:
raise KeyError('Incorrect Schema')
for field in schema_obj:
field_type = schema_obj[field]['type']
if field not in val:
raise KeyError('Field %s does not exist in value object' % field)
if not val[field] and field in required_fields and val[field] not in (0, False):
raise ValueError('Empty value found for required field %s' % field)
try:
if field_type == "integer":
val[field] = int(val[field])
elif field_type == "boolean":
if type(val[field]) in (six.text_type, str):
val[field] = NamespacedKeyValue.str_to_bool(val[field])
else:
val[field] = bool(val[field])
else:
val[field] = str(val[field])
except:
raise TypeError('Field %s should be of type %s, found %s ' % (field, field_type, type(val[field])))
self._value = json.dumps(val)
| 8,739 | 9,794 | 502 |
d9b5dcb62c6ca6c4af81cd0beadf2ae46d63db99 | 880 | py | Python | tests/test_templates.py | HolgerPeters/pyscaffold | 04f3435fbe882041bf5860e164d07f8bd148a764 | [
"BSD-3-Clause"
] | null | null | null | tests/test_templates.py | HolgerPeters/pyscaffold | 04f3435fbe882041bf5860e164d07f8bd148a764 | [
"BSD-3-Clause"
] | null | null | null | tests/test_templates.py | HolgerPeters/pyscaffold | 04f3435fbe882041bf5860e164d07f8bd148a764 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from pyscaffold import templates
__author__ = "Florian Wilhelm"
__copyright__ = "Blue Yonder"
__license__ = "new BSD"
| 25.142857 | 69 | 0.680682 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from pyscaffold import templates
__author__ = "Florian Wilhelm"
__copyright__ = "Blue Yonder"
__license__ = "new BSD"
def test_get_template():
template = templates.get_template("setup_py")
content = template.safe_substitute()
assert content.split(os.linesep, 1)[0] == '#!/usr/bin/env python'
def test_all_licenses():
args = type("Namespace", (object,), dict())
args.email = "test@user"
args.project = "my_project"
args.author = "myself"
args.year = 1832
for license in templates.licenses.keys():
args.license = license
assert templates.license(args)
def test_best_fit_license():
txt = "new_bsd"
assert templates.best_fit_license(txt) == "new-bsd"
for license in templates.licenses.keys():
assert templates.best_fit_license(license) == license
| 631 | 0 | 69 |
bd3ebf4cefc09856f375b52c8e164c6205ba4d61 | 10,982 | py | Python | modules/visiualize.py | J0nasW/Bachelorarbeit | 37052be075713f2016d42782397ff875e3e982f8 | [
"MIT"
] | null | null | null | modules/visiualize.py | J0nasW/Bachelorarbeit | 37052be075713f2016d42782397ff875e3e982f8 | [
"MIT"
] | null | null | null | modules/visiualize.py | J0nasW/Bachelorarbeit | 37052be075713f2016d42782397ff875e3e982f8 | [
"MIT"
] | null | null | null | """
VISIUALIZATION MODULE loading Parameter Matrices
CALL BY: <visiualize.py>
RETURN: Environment simulation (animated) & Plots
INFO: This Module can load a specific File Dump (cPickle) and visiualize the containig matrices onto a OpenAI Gym Environment
"""
# Some dependencies
import numpy as np
import matplotlib.pyplot as plt
import hickle as hkl
import gym
from .lif import I_syn_calc, I_gap_calc, U_neuron_calc
from .parameters import *
from .random_search_v2 import compute as compute_v2
from .random_search_v2 import observe
from .weights_nn import compute as compute_with_weights
# Initializing OpenAI Environments------------------------------------------------------
env = gym.make('CartPole-v0')
env.reset()
env_vis = []
#---------------------------------------------------------------------------------------
# Initialization----------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# Append Function---------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# Plot Function-----------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# OpenAI Gym--------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
# Main Function-----------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
if __name__=="__main__":
main()
| 34.31875 | 236 | 0.583956 | """
VISIUALIZATION MODULE loading Parameter Matrices
CALL BY: <visiualize.py>
RETURN: Environment simulation (animated) & Plots
INFO: This Module can load a specific File Dump (cPickle) and visiualize the containig matrices onto a OpenAI Gym Environment
"""
# Some dependencies
import numpy as np
import matplotlib.pyplot as plt
import hickle as hkl
import gym
from .lif import I_syn_calc, I_gap_calc, U_neuron_calc
from .parameters import *
from .random_search_v2 import compute as compute_v2
from .random_search_v2 import observe
from .weights_nn import compute as compute_with_weights
# Initializing OpenAI Environments------------------------------------------------------
env = gym.make('CartPole-v0')
env.reset()
env_vis = []
#---------------------------------------------------------------------------------------
# Initialization----------------------------------------------------------------------------
def vis_initialize(Default_U_leak):
for i in range(0,4):
x[i] = Default_U_leak
for i in range(0,4):
u[i] = Default_U_leak
def initialize(Default_U_leak):
# Initializing Neurons and Sensors------------------------------------------------------
for i in range(0,4):
x[i] = Default_U_leak
for i in range(0,4):
u[i] = Default_U_leak
global AVA, AVD, PVC, AVB, PVD, PLM, AVM, ALM, AVA_spike, AVD_spike, PVC_spike, AVB_spike, I_PVC, I_AVD, I_AVA, I_AVB, actions_arr, angles_arr, angle_velocity_arr, totalreward, done, info, actions
AVA = np.array([Default_U_leak])
AVD = np.array([Default_U_leak])
PVC = np.array([Default_U_leak])
AVB = np.array([Default_U_leak])
PVD = np.array([])
PLM = np.array([])
AVM = np.array([])
ALM = np.array([])
AVA_spike = np.array([])
AVD_spike = np.array([])
PVC_spike = np.array([])
AVB_spike = np.array([])
I_PVC = np.array([])
I_AVD = np.array([])
I_AVA = np.array([])
I_AVB = np.array([])
actions_arr = np.array([])
angles_arr = np.array([])
angle_velocity_arr = np.array([])
#---------------------------------------------------------------------------------------
totalreward = 0
done = 0
info = 0
actions = 0
#-------------------------------------------------------------------------------------------
# Append Function---------------------------------------------------------------------------
def arr(x, u, fire, I_all):
global AVA, AVD, PVC, DVA, AVB, PVD, PLM, AVM, ALM, AVA_spike, AVD_spike, PVC_spike, AVB_spike, I_PVC, I_AVD, I_AVA, I_AVB
AVA = np.append(AVA, x[0])
AVD = np.append(AVD, x[1])
PVC = np.append(PVC, x[2])
AVB = np.append(AVB, x[3])
PVD = np.append(PVD, u[0])
PLM = np.append(PLM, u[1])
AVM = np.append(AVM, u[2])
ALM = np.append(ALM, u[3])
AVA_spike = np.append(AVA_spike, fire[0]) # Reverse lokomotion
AVD_spike = np.append(AVD_spike, fire[1]) # Reverse lokomotion
PVC_spike = np.append(PVC_spike, fire[2]) # Reverse lokomotion
AVB_spike = np.append(AVB_spike, fire[3]) # Forward lokomotion
I_AVA = np.append(I_AVA, I_all[0])
I_AVD = np.append(I_AVD, I_all[1])
I_PVC = np.append(I_PVC, I_all[2])
I_AVB = np.append(I_AVB, I_all[3])
#-------------------------------------------------------------------------------------------
# Plot Function-----------------------------------------------------------------------------
def plot():
#plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=14)
plt.figure(1)
#plt.suptitle('TW Circuit Simulator for biological neural Networks', fontsize=16)
plt.subplot(121)
plt.title('Sensory Neurons', fontsize=18)
plt.plot(PLM, '-y', label='PLM (Phi)', linewidth=1)
plt.plot(AVM, '-g', label='AVM (-Phi)', linewidth=1)
plt.plot(ALM, '-r', label='ALM (Phi dot)', linewidth=1)
plt.plot(PVD, '-b', label='PVD (-Phi dot)', linewidth=1)
plt.xlabel('t in 1/10 Sek')
plt.ylabel('u in mV')
plt.legend(loc='upper left')
plt.subplot(122)
plt.title('Inter Neurons', fontsize=18)
plt.plot(AVA, '-b', label='AVA (REV)', linewidth=2)
plt.plot(AVD, '-y', label='AVD (REV)', linewidth=0.7)
plt.plot(PVC, '-g', label='PVC (FWD)', linewidth=0.7)
plt.plot(AVB, '-k', label='AVB (FWD)', linewidth=2)
plt.xlabel('t in 1/10 Sek')
plt.ylabel('u in mV')
plt.legend(loc='upper left')
plt.figure(2)
#plt.suptitle('Neuron Currents', fontsize=16)
plt.subplot(221)
plt.title('PVC', fontsize=22)
plt.plot(I_PVC, '-r', label='PVC', linewidth=1)
plt.subplot(222)
plt.title('AVD', fontsize=22)
plt.plot(I_AVD, '-r', label='AVD', linewidth=1)
plt.subplot(223)
plt.title('AVA', fontsize=22)
plt.plot(I_AVA, '-r', label='AVA', linewidth=0.5)
plt.xlabel('t')
plt.ylabel('i in mA')
plt.subplot(224)
plt.title('AVB', fontsize=22)
plt.plot(I_AVB, '-r', label='AVB', linewidth=0.5)
plt.figure(3)
#plt.suptitle('Action and Angle of this Simulation', fontsize=16)
plt.plot(actions_arr, '-r', label='Actions', linewidth=1)
plt.plot(angles_arr, '-b', label='Angle [deg]', linewidth=1)
plt.plot(angle_velocity_arr, '-g', label='Angle Velocity [m/s]', linewidth=1)
plt.xlabel('t')
plt.ylabel('Action / Angle in Deg')
plt.legend(loc='upper left')
plt.show()
#-------------------------------------------------------------------------------------------
def import_parameters(parameter_matrices):
result_parameters = hkl.load(parameter_matrices)
w_A_rnd = result_parameters[0]
w_B_rnd = result_parameters[1]
w_B_gap_rnd = result_parameters[2]
sig_A_rnd = result_parameters[3]
sig_B_rnd = result_parameters[4]
C_m_rnd = result_parameters[5]
G_leak_rnd = result_parameters[6]
U_leak_rnd = result_parameters[7]
return w_A_rnd, w_B_rnd, w_B_gap_rnd, sig_A_rnd, sig_B_rnd, C_m_rnd, G_leak_rnd, U_leak_rnd
def import_weights(load_weights):
result_weights = hkl.load(load_weights)
A_rnd = result_weights[0]
B_rnd = result_weights[1]
return A_rnd, B_rnd
# OpenAI Gym--------------------------------------------------------------------------------
def run_episode(env, fire):
global observation, reward, done, info, totalreward, action, env_vis, uncertain, actions_arr, angles_arr, angle_velocity_arr, actions
env_vis.append(env.render(mode = 'rgb_array'))
# - action = 0 LEFT - action = 1 RIGHT
if fire[0] == 1: # AVA (REV) is firing
action = 0
observation, reward, done, info = env.step(action)
#print 'LEFT'
elif fire[3] == 1: # AVB (FWD) is firing
action = 1
observation, reward, done, info = env.step(action)
#print 'RIGHT'
else:
uncertain +=1
observation, reward, done, info = env.step(action)
totalreward += reward
angle, angle_velocity = observe(observation)
if done:
action = 0
actions = 0
if action == 0:
actions -= 1
elif action == 1:
actions += 1
actions_arr = np.append(actions_arr, actions)
angles_arr = np.append(angles_arr, np.absolute(angle))
angle_velocity_arr = np.append(angle_velocity_arr, np.absolute(angle_velocity))
return totalreward, done, uncertain
def env_render(env_vis):
plt.figure()
plot = plt.imshow(env_vis[0])
plt.axis('off')
def animate(i):
plot.set_data(env_vis[i])
anim = anm.FuncAnimation(plt.gcf(), animate, frames=len(env_vis), interval=20, repeat=True, repeat_delay=20)
display(display_animation(anim, default_mode='loop'))
#-------------------------------------------------------------------------------------------
# Main Function-----------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------
def main(parameter_matrices, runtime):
global x, u, env, action, uncertain
observation = env.reset()
action = 0
actions = 0
episodes = 0
uncertain = 0
initialize(Default_U_leak) # Initializing all Interneurons with the desired leakage voltage
w_A_rnd, w_B_rnd, w_B_gap_rnd, sig_A_rnd, sig_B_rnd, C_m_rnd, G_leak_rnd, U_leak_rnd = import_parameters(parameter_matrices)
for t in np.arange(t0,runtime,delta_t):
x, u, fire, I_syn, I_gap = compute_v2(x, u, w_A_rnd, w_B_rnd, w_B_gap_rnd, sig_A_rnd, sig_B_rnd, C_m_rnd, G_leak_rnd, U_leak_rnd) # Compute the next Interneuron Voltages along with a possible "fire" Event
x, u, fire, I_syn, I_gap = compute_v2(x, u, w_A_rnd, w_B_rnd, w_B_gap_rnd, sig_A_rnd, sig_B_rnd, C_m_rnd, G_leak_rnd, U_leak_rnd) # Compute the next Interneuron Voltages along with a possible "fire" Event
I_all = np.add(I_syn, I_gap)
arr(x, u, fire, I_all) # Storing Information for graphical analysis
# OpenAI GYM PART----------------------------------
totalreward, done, uncertain = run_episode(env, fire)
if done:
env.reset()
vis_initialize(Default_U_leak)
episodes = episodes + 1
print ("Did",episodes,"Episodes and was",uncertain,"out of",len(actions_arr),"times uncertain!")
env_render(env_vis)
plot() # Plotting everyting using matplotlib
#-------------------------------------------------------------------------------------------
def main_with_weights(load_parameters, load_weights, runtime):
global x, u, env, action, uncertain
observation = env.reset()
action = 0
actions = 0
episodes = 0
uncertain = 0
initialize(Default_U_leak) # Initializing all Interneurons with the desired leakage voltage
w_A_rnd, w_B_rnd, w_B_gap_rnd, sig_A_rnd, sig_B_rnd, C_m_rnd, G_leak_rnd, U_leak_rnd = import_parameters(load_parameters)
A_rnd, B_rnd = import_weights(load_weights)
for t in np.arange(t0,runtime,delta_t):
x, u, fire, I_syn, I_gap = compute_with_weights(x, u, w_A_rnd, w_B_rnd, w_B_gap_rnd, sig_A_rnd, sig_B_rnd, C_m_rnd, G_leak_rnd, U_leak_rnd, A_rnd, B_rnd) # Compute the next Interneuron Voltages along with a possible "fire" Event
x, u, fire, I_syn, I_gap = compute_with_weights(x, u, w_A_rnd, w_B_rnd, w_B_gap_rnd, sig_A_rnd, sig_B_rnd, C_m_rnd, G_leak_rnd, U_leak_rnd, A_rnd, B_rnd) # Compute the next Interneuron Voltages along with a possible "fire" Event
I_all = np.add(I_syn, I_gap)
arr(x, u, fire, I_all) # Storing Information for graphical analysis
# OpenAI GYM PART----------------------------------
totalreward, done, uncertain = run_episode(env, fire)
if done:
env.reset()
vis_initialize(Default_U_leak)
episodes = episodes + 1
print ("Did",episodes,"Episodes and was",uncertain,"out of",len(actions_arr),"times uncertain!")
env_render(env_vis)
plot() # Plotting everyting using matplotlib
if __name__=="__main__":
main()
| 8,817 | 0 | 253 |
ee98387fd4e1ddaf19465b45c6b6065f7cae2b27 | 778 | py | Python | src/SocialNetwork_API/services/data.py | mungpham/mungpham | 3545dafdb498503d2f138d4b7515a7ae8f195994 | [
"MIT"
] | null | null | null | src/SocialNetwork_API/services/data.py | mungpham/mungpham | 3545dafdb498503d2f138d4b7515a7ae8f195994 | [
"MIT"
] | null | null | null | src/SocialNetwork_API/services/data.py | mungpham/mungpham | 3545dafdb498503d2f138d4b7515a7ae8f195994 | [
"MIT"
] | null | null | null | from django.db import transaction
from rest_framework.generics import get_object_or_404
from SocialNetwork_API.models import *
from SocialNetwork_API.services.base import BaseService
| 23.575758 | 55 | 0.61054 | from django.db import transaction
from rest_framework.generics import get_object_or_404
from SocialNetwork_API.models import *
from SocialNetwork_API.services.base import BaseService
class DataService(BaseService):
@classmethod
def save(cls, request_data, instance=None):
try:
file = request_data.pop('file', None)
data = instance if instance else Data()
for key in request_data:
setattr(data, key, request_data[key])
file_info = None
if file:
return None
with transaction.atomic():
data.save()
return data
except Exception as exception:
cls.log_exception(exception)
raise exception
| 513 | 54 | 23 |
c060d270e4a839f2d102a43b53b00c4b5cafc8bf | 194 | py | Python | scripts/item/consume_2435432.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/item/consume_2435432.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/item/consume_2435432.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | # Purple Damage Skin
success = sm.addDamageSkin(2435432)
if success:
sm.chat("The Purple Damage Skin has been added to your account's damage skin collection.")
# sm.consumeItem(2435432)
| 32.333333 | 94 | 0.747423 | # Purple Damage Skin
success = sm.addDamageSkin(2435432)
if success:
sm.chat("The Purple Damage Skin has been added to your account's damage skin collection.")
# sm.consumeItem(2435432)
| 0 | 0 | 0 |
a0b682c4d267ee26a88f0f1bef70201ce07d02df | 128 | py | Python | Taekwon/Python/baseGrammar/codeup044.py | sonnysorry/codingtest | 478e0168e3209eb97b6b16910027bf12ccc3ccd0 | [
"MIT"
] | 2 | 2021-09-27T19:10:36.000Z | 2021-11-09T05:40:39.000Z | Taekwon/Python/baseGrammar/codeup044.py | sonnysorry/codingtest | 478e0168e3209eb97b6b16910027bf12ccc3ccd0 | [
"MIT"
] | 1 | 2021-11-15T14:56:54.000Z | 2021-11-15T14:56:54.000Z | Taekwon/Python/baseGrammar/codeup044.py | sonnysorry/codingtest | 478e0168e3209eb97b6b16910027bf12ccc3ccd0 | [
"MIT"
] | null | null | null | a, b = input().split()
a = int(a)
b = int(b)
print(a+b)
print(a-b)
print(a*b)
print(a//b)
print(a%b)
print(format(a/b, ".2f"))
| 11.636364 | 25 | 0.5625 | a, b = input().split()
a = int(a)
b = int(b)
print(a+b)
print(a-b)
print(a*b)
print(a//b)
print(a%b)
print(format(a/b, ".2f"))
| 0 | 0 | 0 |
216e26713a4f19875917c6508f940c1385c14fc9 | 5,521 | py | Python | notebooks/sampling.py | rodluger/pymc3-ext | 3026dbfa88a63f56136ecce7c18f3a157faf91d1 | [
"MIT"
] | 15 | 2020-10-21T21:14:42.000Z | 2022-02-02T02:16:34.000Z | notebooks/sampling.py | rodluger/pymc3-ext | 3026dbfa88a63f56136ecce7c18f3a157faf91d1 | [
"MIT"
] | 22 | 2020-09-26T14:44:16.000Z | 2022-02-16T12:14:45.000Z | notebooks/sampling.py | rodluger/pymc3-ext | 3026dbfa88a63f56136ecce7c18f3a157faf91d1 | [
"MIT"
] | 4 | 2021-01-25T20:44:07.000Z | 2021-11-18T14:57:23.000Z | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
# %matplotlib inline
# %%
# %run notebook_setup
# %% [markdown]
# # Sampling
#
# `pymc3-ext` comes with some functions to make sampling more flexible in some cases and improve the default parameter choices for the types of problems encountered in astrophysics.
# These features are accessed through the `pymc3_ext.sample` function that behaves mostly like the `pymc3.sample` function with a couple of different arguments.
# The two main differences for all users is that the `pymc3_ext.sample` function defaults to a target acceptance fraction of `0.9` (which will be better for many models in astrophysics) and to adapting a full dense mass matrix (instead of diagonal).
# Therefore, if there are covariances between parameters, this method will generally perform better than the PyMC3 defaults.
#
# ## Correlated parameters
#
# A thorough discussion of this [can be found elsewhere online](https://dfm.io/posts/pymc3-mass-matrix/), but here is a simple demo where we sample a covariant Gaussian using `pymc3_ext.sample`.
#
# First, we generate a random positive definite covariance matrix for the Gaussian:
# %%
import numpy as np
ndim = 5
np.random.seed(42)
L = np.random.randn(ndim, ndim)
L[np.diag_indices_from(L)] = 0.1 * np.exp(L[np.diag_indices_from(L)])
L[np.triu_indices_from(L, 1)] = 0.0
cov = np.dot(L, L.T)
# %% [markdown]
# And then we can set up this model using PyMC3:
# %%
import pymc3 as pm
with pm.Model() as model:
pm.MvNormal("x", mu=np.zeros(ndim), chol=L, shape=ndim)
# %% [markdown]
# If we sample this using PyMC3 default sampling method, things don't go so well (we're only doing a small number of steps because we don't want it to take forever, but things don't get better if you run for longer!):
# %%
with model:
trace = pm.sample(tune=500, draws=500, chains=2, cores=2)
# %% [markdown]
# But, we can use `pymc3_ext.sample` as a drop in replacement to get much better performance:
# %%
import pymc3_ext as pmx
with model:
tracex = pmx.sample(tune=1000, draws=1000, chains=2, cores=2)
# %% [markdown]
# As you can see, this is substantially faster (even though we generated twice as many samples).
#
# We can compare the sampling summaries to confirm that the default method did not produce reliable results in this case, while the `pymc3_ext` version did:
# %%
import arviz as az
az.summary(trace).head()
# %%
az.summary(tracex).head()
# %% [markdown]
# In this particular case, you could get similar performance using the `init="adapt_full"` argument to the `sample` function in PyMC3, but the implementation in `pymc3-ext` is somewhat more flexible.
# Specifically, `pymc3_ext` implements a tuning procedure that it more similar to [the one implemented by the Stan project](https://mc-stan.org/docs/2_24/reference-manual/hmc-algorithm-parameters.html).
# The relevant parameters are:
#
# - `warmup_window`: The length of the initial "fast" window. This is called "initial buffer" in the Stan docs.
# - `adapt_window`: The length of the initial "slow" window. This is called "window" in the Stan docs.
# - `cooldown_window`: The length of the final "fast" window. This is called "term buffer" in the Stan docs.
#
# Unlike the Stan implementation, here we have support for updating the mass matrix estimate every `recompute_interval` steps based on the previous window and all the steps in the current window so far.
# This can improve warm up performance substantially so the default value is `1`, but this might be intractable for high dimensional models.
# To only recompute the estimate at the end of each window, set `recompute_interval=0`.
#
# If you run into numerical issues, you can try increasing `adapt_window` or use the `regularization_steps`and `regularization_variance` to regularize the mass matrix estimator.
# The `regularization_steps` parameter sets the effective number of steps that are used for regularization and `regularization_variance` is the effective variance for those steps.
# %% [markdown]
# ## Parameter groups
#
# If you are fitting a model with a large number of parameters, it might not be computationally or numerically tractable to estimate the full dense mass matrix.
# But, sometimes you might know something about the covariance structure of the problem that you can exploit.
# Perhaps some parameters are correlated with each other, but not with others.
# In this case, you can use the `parameter_groups` argument to exploit this structure.
#
# Here is an example where `x`, `y`, and `z` are all independent with different covariance structure.
# We can take advantage of this structure using `pmx.ParameterGroup` specifications in the `parameter_groups` argument.
# Note that by default each group will internally estimate a dense mass matrix, but here we specifically only estimate a diagonal mass matrix for `z`.
# %%
with pm.Model():
x = pm.MvNormal("x", mu=np.zeros(ndim), chol=L, shape=ndim)
y = pm.MvNormal("y", mu=np.zeros(ndim), chol=L, shape=ndim)
z = pm.Normal("z", shape=ndim) # Uncorrelated
tracex2 = pmx.sample(
tune=1000,
draws=1000,
chains=2,
cores=2,
parameter_groups=[
[x],
[y],
pmx.ParameterGroup([z], "diag"),
],
)
# %%
| 42.469231 | 249 | 0.727042 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.6.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
# %matplotlib inline
# %%
# %run notebook_setup
# %% [markdown]
# # Sampling
#
# `pymc3-ext` comes with some functions to make sampling more flexible in some cases and improve the default parameter choices for the types of problems encountered in astrophysics.
# These features are accessed through the `pymc3_ext.sample` function that behaves mostly like the `pymc3.sample` function with a couple of different arguments.
# The two main differences for all users is that the `pymc3_ext.sample` function defaults to a target acceptance fraction of `0.9` (which will be better for many models in astrophysics) and to adapting a full dense mass matrix (instead of diagonal).
# Therefore, if there are covariances between parameters, this method will generally perform better than the PyMC3 defaults.
#
# ## Correlated parameters
#
# A thorough discussion of this [can be found elsewhere online](https://dfm.io/posts/pymc3-mass-matrix/), but here is a simple demo where we sample a covariant Gaussian using `pymc3_ext.sample`.
#
# First, we generate a random positive definite covariance matrix for the Gaussian:
# %%
import numpy as np
ndim = 5
np.random.seed(42)
L = np.random.randn(ndim, ndim)
L[np.diag_indices_from(L)] = 0.1 * np.exp(L[np.diag_indices_from(L)])
L[np.triu_indices_from(L, 1)] = 0.0
cov = np.dot(L, L.T)
# %% [markdown]
# And then we can set up this model using PyMC3:
# %%
import pymc3 as pm
with pm.Model() as model:
pm.MvNormal("x", mu=np.zeros(ndim), chol=L, shape=ndim)
# %% [markdown]
# If we sample this using PyMC3 default sampling method, things don't go so well (we're only doing a small number of steps because we don't want it to take forever, but things don't get better if you run for longer!):
# %%
with model:
trace = pm.sample(tune=500, draws=500, chains=2, cores=2)
# %% [markdown]
# But, we can use `pymc3_ext.sample` as a drop in replacement to get much better performance:
# %%
import pymc3_ext as pmx
with model:
tracex = pmx.sample(tune=1000, draws=1000, chains=2, cores=2)
# %% [markdown]
# As you can see, this is substantially faster (even though we generated twice as many samples).
#
# We can compare the sampling summaries to confirm that the default method did not produce reliable results in this case, while the `pymc3_ext` version did:
# %%
import arviz as az
az.summary(trace).head()
# %%
az.summary(tracex).head()
# %% [markdown]
# In this particular case, you could get similar performance using the `init="adapt_full"` argument to the `sample` function in PyMC3, but the implementation in `pymc3-ext` is somewhat more flexible.
# Specifically, `pymc3_ext` implements a tuning procedure that it more similar to [the one implemented by the Stan project](https://mc-stan.org/docs/2_24/reference-manual/hmc-algorithm-parameters.html).
# The relevant parameters are:
#
# - `warmup_window`: The length of the initial "fast" window. This is called "initial buffer" in the Stan docs.
# - `adapt_window`: The length of the initial "slow" window. This is called "window" in the Stan docs.
# - `cooldown_window`: The length of the final "fast" window. This is called "term buffer" in the Stan docs.
#
# Unlike the Stan implementation, here we have support for updating the mass matrix estimate every `recompute_interval` steps based on the previous window and all the steps in the current window so far.
# This can improve warm up performance substantially so the default value is `1`, but this might be intractable for high dimensional models.
# To only recompute the estimate at the end of each window, set `recompute_interval=0`.
#
# If you run into numerical issues, you can try increasing `adapt_window` or use the `regularization_steps`and `regularization_variance` to regularize the mass matrix estimator.
# The `regularization_steps` parameter sets the effective number of steps that are used for regularization and `regularization_variance` is the effective variance for those steps.
# %% [markdown]
# ## Parameter groups
#
# If you are fitting a model with a large number of parameters, it might not be computationally or numerically tractable to estimate the full dense mass matrix.
# But, sometimes you might know something about the covariance structure of the problem that you can exploit.
# Perhaps some parameters are correlated with each other, but not with others.
# In this case, you can use the `parameter_groups` argument to exploit this structure.
#
# Here is an example where `x`, `y`, and `z` are all independent with different covariance structure.
# We can take advantage of this structure using `pmx.ParameterGroup` specifications in the `parameter_groups` argument.
# Note that by default each group will internally estimate a dense mass matrix, but here we specifically only estimate a diagonal mass matrix for `z`.
# %%
with pm.Model():
x = pm.MvNormal("x", mu=np.zeros(ndim), chol=L, shape=ndim)
y = pm.MvNormal("y", mu=np.zeros(ndim), chol=L, shape=ndim)
z = pm.Normal("z", shape=ndim) # Uncorrelated
tracex2 = pmx.sample(
tune=1000,
draws=1000,
chains=2,
cores=2,
parameter_groups=[
[x],
[y],
pmx.ParameterGroup([z], "diag"),
],
)
# %%
| 0 | 0 | 0 |
e50200f3384b4a1649a8a43e5db6f74a5ac48b44 | 2,643 | py | Python | credit_default/build_model.py | sandymule/Credit-Card-Default | c9d67feffa65fb7aad514bd9c1991766e8e2777b | [
"MIT"
] | 1 | 2017-05-20T06:08:05.000Z | 2017-05-20T06:08:05.000Z | credit_default/build_model.py | sandymule/credit-card-default | c9d67feffa65fb7aad514bd9c1991766e8e2777b | [
"MIT"
] | null | null | null | credit_default/build_model.py | sandymule/credit-card-default | c9d67feffa65fb7aad514bd9c1991766e8e2777b | [
"MIT"
] | 2 | 2017-05-20T06:08:25.000Z | 2019-05-18T19:59:31.000Z | # old code from iris example
# from sklearn.neighbors import KNeighborsClassifier
# from sklearn.datasets import load_iris
# from sklearn.externals import joblib
#import all the needed imports
import numpy as np
import pandas as pd
import os
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.learning_curve import learning_curve
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
# old code from iris example for reference
# if __name__ == "__main__":
# # Load Iris Data
# iris_data = load_iris()
# features = iris_data.data
# feature_names = iris_data.feature_names
# target = iris_data.target
# target_names = iris_data.target_names
#
# knn = KNeighborsClassifier(n_neighbors=3) # replace with your own ML model here
# knn.fit(features, target)
#
# joblib.dump(knn, 'models/iris_model.pkl')
if __name__ == "__main__":
#load data
df = pd.read_csv('default_of_credit_card_clients.csv')
df = df.dropna()
df = df.drop('ID', axis = 1)
df['default payment next month'] = df['default payment next month'].replace(to_replace=0, value="Paid")
df['default payment next month'] = df['default payment next month'].replace(to_replace=1, value="Default")
df['LIMIT_BAL'] = df['LIMIT_BAL']/1000
#makes the percentage columns I was talking about - pct paid 1 is 1 month ago, pct paid 2 is 2 months ago, etc.
percent_maker(df)
#replaces null and infinite values
df = df.replace({None:0, np.inf:1})
#new X features for modeling...
features = df[['LIMIT_BAL', 'SEX', 'EDUCATION', 'MARRIAGE', 'AGE','pct_paid_1', 'pct_paid_2', 'pct_paid_3',
'pct_paid_4', 'pct_paid_5', 'pct_paid_6']]
feature_names = list(features.columns.values)
target = df['default payment next month']
target_names = ["Paid", "Default"]
# run randomforest on data we have
RF = RandomForestClassifier()
RF.fit(features, target)
joblib.dump(RF, 'models/credit_model.pkl')
| 38.867647 | 115 | 0.718502 | # old code from iris example
# from sklearn.neighbors import KNeighborsClassifier
# from sklearn.datasets import load_iris
# from sklearn.externals import joblib
#import all the needed imports
import numpy as np
import pandas as pd
import os
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.externals import joblib
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import confusion_matrix
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.learning_curve import learning_curve
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix
# old code from iris example for reference
# if __name__ == "__main__":
# # Load Iris Data
# iris_data = load_iris()
# features = iris_data.data
# feature_names = iris_data.feature_names
# target = iris_data.target
# target_names = iris_data.target_names
#
# knn = KNeighborsClassifier(n_neighbors=3) # replace with your own ML model here
# knn.fit(features, target)
#
# joblib.dump(knn, 'models/iris_model.pkl')
if __name__ == "__main__":
#load data
df = pd.read_csv('default_of_credit_card_clients.csv')
df = df.dropna()
df = df.drop('ID', axis = 1)
df['default payment next month'] = df['default payment next month'].replace(to_replace=0, value="Paid")
df['default payment next month'] = df['default payment next month'].replace(to_replace=1, value="Default")
df['LIMIT_BAL'] = df['LIMIT_BAL']/1000
#makes the percentage columns I was talking about - pct paid 1 is 1 month ago, pct paid 2 is 2 months ago, etc.
def percent_maker(df):
for i in range(1,7):
df[('pct_paid_{}'.format(i))] = df[('PAY_AMT{}'.format(i))] / df[('BILL_AMT{}'.format(i))]
percent_maker(df)
#replaces null and infinite values
df = df.replace({None:0, np.inf:1})
#new X features for modeling...
features = df[['LIMIT_BAL', 'SEX', 'EDUCATION', 'MARRIAGE', 'AGE','pct_paid_1', 'pct_paid_2', 'pct_paid_3',
'pct_paid_4', 'pct_paid_5', 'pct_paid_6']]
feature_names = list(features.columns.values)
target = df['default payment next month']
target_names = ["Paid", "Default"]
# run randomforest on data we have
RF = RandomForestClassifier()
RF.fit(features, target)
joblib.dump(RF, 'models/credit_model.pkl')
| 133 | 0 | 26 |
6c5e0b211f2110134bbac0fcd2258dae8b2f920a | 2,599 | py | Python | Unidad 2/PySide6/act 4/act4.py | Parrilla38/JesusParraAndres--di2122 | fcc9333e1082440e29776691c969e91a5f23c3b9 | [
"Apache-2.0"
] | 1 | 2022-03-02T08:29:53.000Z | 2022-03-02T08:29:53.000Z | Unidad 2/PySide6/act 4/act4.py | Parrilla38/JesusParraAndres--di2122 | fcc9333e1082440e29776691c969e91a5f23c3b9 | [
"Apache-2.0"
] | null | null | null | Unidad 2/PySide6/act 4/act4.py | Parrilla38/JesusParraAndres--di2122 | fcc9333e1082440e29776691c969e91a5f23c3b9 | [
"Apache-2.0"
] | 1 | 2021-12-10T10:24:14.000Z | 2021-12-10T10:24:14.000Z | from PySide6.QtWidgets import QApplication, QMainWindow, QPushButton
from PySide6.QtGui import QScreen
from PySide6.QtCore import QSize
from config import t_max, t_min, t_norm, b_x, b_y
if __name__ == "__main__":
app = QApplication([])
mainWin = MainWindow()
mainWin.show()
app.exec() | 30.22093 | 107 | 0.618699 | from PySide6.QtWidgets import QApplication, QMainWindow, QPushButton
from PySide6.QtGui import QScreen
from PySide6.QtCore import QSize
from config import t_max, t_min, t_norm, b_x, b_y
class MainWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
# Tamaño de la pantalla
self.my_screen = QScreen.availableGeometry(QApplication.primaryScreen())
self.setMaximumSize(t_max)
self.setMinimumSize(t_min)
self.setWindowTitle("Exemple signals-slots 1")
self.pybutton = QPushButton('Maximizar', self)
self.pybutton2 = QPushButton('Normalitza', self)
self.pybutton3 = QPushButton('Minimizar', self)
#Connectem la senyal clicked a la ranura button_pressed
self.pybutton.clicked.connect(self.button_pressedmax)
self.pybutton2.clicked.connect(self.button_pressednormal)
self.pybutton3.clicked.connect(self.button_pressedmin)
self.pybutton.resize(b_x, b_y)
self.pybutton2.resize(b_x, b_y)
self.pybutton3.resize(b_x, b_y)
self.cambia_tam(t_norm)
self.setFixedSize(t_norm)
def cambia_tam(self, tam):
self.move((self.my_screen.width() - tam.width()) / 2, (self.my_screen.height() - tam.height()) / 2)
self.pybutton.move((tam.width() / 5) - (b_x / 2), (tam.height() / 2) - (b_y / 2))
self.pybutton2.move((tam.width() / 2) - (b_x / 2), (tam.height() / 2) - (b_y / 2))
self.pybutton3.move((tam.width() / 1.25) - (b_x / 2), (tam.height() / 2) - (b_y / 2))
def button_pressedmax(self):
self.setWindowTitle("Maximizado")
self.setFixedSize(t_max)
self.cambia_tam(t_max)
self.pybutton.setEnabled(False)
self.pybutton2.setEnabled(True)
self.pybutton3.setEnabled(True)
def button_pressednormal(self):
self.setWindowTitle("Normal")
self.setFixedSize(t_norm)
self.cambia_tam(t_norm)
self.pybutton.setEnabled(True)
self.pybutton2.setEnabled(False)
self.pybutton3.setEnabled(True)
def button_pressedmin(self):
self.setWindowTitle("Minimizado")
self.setFixedSize(t_min)
self.cambia_tam(t_min)
self.pybutton.setEnabled(True)
self.pybutton2.setEnabled(True)
self.pybutton3.setEnabled(False)
if __name__ == "__main__":
app = QApplication([])
mainWin = MainWindow()
mainWin.show()
app.exec() | 2,076 | 9 | 177 |
ec3cddc11f318d8113a7c3f3f3a87cf54e03cb08 | 3,608 | py | Python | smartsquash/helpers.py | max-wittig/smartsquash | 3f68f2f3f3909316017126941ae2d5373593025c | [
"MIT"
] | 2 | 2020-03-09T13:17:29.000Z | 2020-03-10T14:09:25.000Z | smartsquash/helpers.py | max-wittig/smartsquash | 3f68f2f3f3909316017126941ae2d5373593025c | [
"MIT"
] | 1 | 2020-03-11T08:59:03.000Z | 2020-03-11T22:00:16.000Z | smartsquash/helpers.py | max-wittig/smartsquash | 3f68f2f3f3909316017126941ae2d5373593025c | [
"MIT"
] | null | null | null | import git
import git.exc
import os
from pathlib import Path
import subprocess
import sys
import collections
import enum
from typing import List, Dict, Set, Optional
from smartsquash.decorators import memorize_files_changed
from loguru import logger
def retrieve_commits(
repo: git.Repo, target_branch: str, reverse: bool = True
) -> List[git.Commit]:
"""
retrieves commits that are only part of the currently active branch,
and are not in the target branch
- git cherry could be used for this, but GitPython doesn't support it
- Just run raw git command, if this becomes bottleneck
"""
target_commits_sha: List[git.Commit] = [
commit.hexsha
for commit in repo.iter_commits(rev=target_branch)
if len(commit.parents) < 2 # ignore merge commits
]
commits: List[git.Commit] = [
commit
for commit in repo.iter_commits(rev=repo.active_branch)
if commit.hexsha not in target_commits_sha
and len(commit.parents) < 2 # ignore merge commits
]
if reverse:
commits.reverse()
return commits
@memorize_files_changed
| 31.649123 | 86 | 0.681818 | import git
import git.exc
import os
from pathlib import Path
import subprocess
import sys
import collections
import enum
from typing import List, Dict, Set, Optional
from smartsquash.decorators import memorize_files_changed
from loguru import logger
class ErrorMessage(enum.Enum):
HEAD_DETACHED = "HEAD is detached. Exiting"
TARGET_EQUALS_CURRENT = "Target branch equals current active branch. Exiting"
NOT_A_GIT_REPO = "The target is not a git repository. Exiting"
PATH_NOT_EXIST = "The path doesn't exist. Exiting"
TARGET_NOT_EXIST = "The target branch doesn't exist. Exiting"
def __str__(self):
return self.value
def fatal_log(message: ErrorMessage):
logger.error(message)
sys.exit(1)
def get_repo(repo_path: str, target_branch: str) -> git.Repo:
if not Path(repo_path).exists():
fatal_log(ErrorMessage.PATH_NOT_EXIST)
try:
repo: git.Repo = git.Repo(repo_path, search_parent_directories=True)
except git.exc.InvalidGitRepositoryError:
fatal_log(ErrorMessage.NOT_A_GIT_REPO)
try:
repo.heads[target_branch]
except (git.exc.GitCommandError, git.GitCommandError, AttributeError, IndexError):
fatal_log(ErrorMessage.TARGET_NOT_EXIST)
if repo.head.is_detached:
fatal_log(ErrorMessage.HEAD_DETACHED)
if repo.active_branch.name == target_branch:
fatal_log(ErrorMessage.TARGET_EQUALS_CURRENT)
return repo
def retrieve_commits(
repo: git.Repo, target_branch: str, reverse: bool = True
) -> List[git.Commit]:
"""
retrieves commits that are only part of the currently active branch,
and are not in the target branch
- git cherry could be used for this, but GitPython doesn't support it
- Just run raw git command, if this becomes bottleneck
"""
target_commits_sha: List[git.Commit] = [
commit.hexsha
for commit in repo.iter_commits(rev=target_branch)
if len(commit.parents) < 2 # ignore merge commits
]
commits: List[git.Commit] = [
commit
for commit in repo.iter_commits(rev=repo.active_branch)
if commit.hexsha not in target_commits_sha
and len(commit.parents) < 2 # ignore merge commits
]
if reverse:
commits.reverse()
return commits
@memorize_files_changed
def files_changed_by_commit(working_dir: str, commit: str) -> List[str]:
output = (
subprocess.check_output(
["git", "diff-tree", "--no-commit-id", "--name-only", "-r", commit],
cwd=working_dir,
)
.decode()
.splitlines()
)
return [output for output in output if output]
def get_commits_changed_files(commits: List[git.Commit]) -> Dict[str, Set[str]]:
commit_changed: Dict[str, Set[str]] = collections.defaultdict(set)
for commit in commits:
for file in files_changed_by_commit(commit.repo.working_dir, commit.hexsha):
commit_changed[commit.hexsha].add(file)
return commit_changed
def run_rebase(
repo: git.Repo,
target_branch: str,
sequence_editor: str,
dry: bool = False,
autosquash: bool = True,
):
os.environ["GIT_SEQUENCE_EDITOR"] = sequence_editor
args: List[str] = ["-i", target_branch]
if autosquash:
args.insert(0, "--autosquash")
if dry:
logger.log("DRY", f"Would run: 'git rebase{' '.join(args)}'")
sys.exit(0)
try:
repo.git.rebase(args)
print("Rebase done")
except git.CommandError:
repo.git.rebase("--abort")
logger.error("Rebase failed and aborted. You'll need to squash manually")
| 1,987 | 354 | 137 |
2f2449777fb9ba94f5d49b16485c65e08cefa8b4 | 29,259 | py | Python | model.py | 2AiBAIT/StoneRecog | 9d840eb09e05997ed130c0989334f77b1a872e2b | [
"MIT"
] | null | null | null | model.py | 2AiBAIT/StoneRecog | 9d840eb09e05997ed130c0989334f77b1a872e2b | [
"MIT"
] | null | null | null | model.py | 2AiBAIT/StoneRecog | 9d840eb09e05997ed130c0989334f77b1a872e2b | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow.python.keras.layers import Input
# gpus = tf.config.list_physical_devices('GPU')
# for gpu in gpus:
# tf.config.experimental.set_memory_growth(gpu, True)
| 52.15508 | 135 | 0.582146 | import tensorflow as tf
from tensorflow.python.keras.layers import Input
# gpus = tf.config.list_physical_devices('GPU')
# for gpu in gpus:
# tf.config.experimental.set_memory_growth(gpu, True)
class jbdm_v0(object):
def build(num_class, input_size=(128, 128, 3), pretrained_weights=None, lr=1e-3):
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_size),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(num_class, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(model.summary())
if pretrained_weights:
model.load_weights(pretrained_weights)
return model
class SR_MobileNetV2(): # jbdm_v2_32():
def build(num_class, input_size=(128, 128, 3), classifierLayer=512, dropout=0, pretrained_weights=None, lr=1e-3, retrainAll=False):
baseModel = tf.keras.applications.mobilenet_v2.MobileNetV2(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
if retrainAll:
baseModel.trainable = True
else:
baseModel.trainable = False
# baseModel.trainable = False
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
if classifierLayer > 0:
if dropout > 0:
base_output = tf.keras.layers.Dropout(dropout)(base_output)
base_output = tf.keras.layers.Dense(classifierLayer, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class SR_MobileNetV3Small(): # jbdm_v2_5():
def build(num_class, input_size=(128, 128, 3), classifierLayer=512, dropout=0, pretrained_weights=None, lr=1e-3, retrainAll=False):
baseModel = tf.keras.applications.MobileNetV3Small(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
if retrainAll:
baseModel.trainable = True
else:
baseModel.trainable = False
# baseModel.trainable = False
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
if classifierLayer > 0:
if dropout > 0:
base_output = tf.keras.layers.Dropout(dropout)(base_output)
base_output = tf.keras.layers.Dense(classifierLayer, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class SR_MobileNetV3Large(): # jbdm_v2_7():
def build(num_class, input_size=(128, 128, 3), classifierLayer=512, dropout=0, pretrained_weights=None, lr=1e-3, retrainAll=False):
baseModel = tf.keras.applications.MobileNetV3Large(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
if retrainAll:
baseModel.trainable = True
else:
baseModel.trainable = False
# baseModel.trainable = False
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
if classifierLayer > 0:
if dropout > 0:
base_output = tf.keras.layers.Dropout(dropout)(base_output)
base_output = tf.keras.layers.Dense(classifierLayer, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class SR_InceptionResNetV2(): # jbdm_v3():
def build(num_class, input_size=(128, 128, 3), classifierLayer=512, dropout=0, pretrained_weights=None, lr=1e-3, retrainAll=False):
# baseModel = tf.keras.applications.inception_resnet_v2.InceptionResNetV2(weights='imagenet')
baseModel = tf.keras.applications.inception_resnet_v2.InceptionResNetV2(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
if retrainAll:
baseModel.trainable = True
else:
baseModel.trainable = False
# baseModel.trainable = False
# base_output = baseModel.layers[-2].output # layer number obtained from model summary above
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
if classifierLayer > 0:
if dropout > 0:
base_output = tf.keras.layers.Dropout(dropout)(base_output)
base_output = tf.keras.layers.Dense(classifierLayer, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class SR_DenseNet201(): # jbdm_v4(): # DenseNet201
def build(num_class, input_size=(128, 128, 3), classifierLayer=512, dropout=0, pretrained_weights=None, lr=1e-3, retrainAll=False):
# baseModel = tf.keras.applications.densenet.DenseNet201(weights='imagenet')
baseModel = tf.keras.applications.densenet.DenseNet201(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
if retrainAll:
baseModel.trainable = True
else:
baseModel.trainable = False
# baseModel.trainable = False
# base_output = baseModel.layers[-2].output # layer number obtained from model summary above
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
if classifierLayer > 0:
if dropout > 0:
base_output = tf.keras.layers.Dropout(dropout)(base_output)
base_output = tf.keras.layers.Dense(classifierLayer, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class SR_DenseNet169(): # jbdm_v4_1(): # DenseNet169
def build(num_class, input_size=(128, 128, 3), classifierLayer=512, dropout=0, pretrained_weights=None, lr=1e-3, retrainAll=False):
# baseModel = tf.keras.applications.densenet.DenseNet169(weights='imagenet')
baseModel = tf.keras.applications.densenet.DenseNet169(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
if retrainAll:
baseModel.trainable = True
else:
baseModel.trainable = False
# baseModel.trainable = False
# base_output = baseModel.layers[-2].output # layer number obtained from model summary above
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
if classifierLayer > 0:
if dropout > 0:
base_output = tf.keras.layers.Dropout(dropout)(base_output)
base_output = tf.keras.layers.Dense(classifierLayer, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class SR_DenseNet121(): # jbdm_v4_2(): # DenseNet121
def build(num_class, input_size=(128, 128, 3), classifierLayer=512, dropout=0, pretrained_weights=None, lr=1e-3, retrainAll=False):
# baseModel = tf.keras.applications.densenet.DenseNet121(weights='imagenet')
if pretrained_weights is None:
weights = 'imagenet'
else:
weights = None
baseModel = tf.keras.applications.densenet.DenseNet121(weights=weights,
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model summary")
print(baseModel.summary())
if not retrainAll:
baseModel.trainable = False
# base_output = baseModel.layers[-2].output # layer number obtained from model summary above
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
if classifierLayer > 0:
if dropout > 0:
base_output = tf.keras.layers.Dropout(dropout)(base_output)
base_output = tf.keras.layers.Dense(classifierLayer, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class SR_NASNetMobile(): # jbdm_v5(): # NASNetMobile
def build(num_class, input_size=(128, 128, 3), classifierLayer=512, dropout=0, pretrained_weights=None, lr=1e-3, retrainAll=False):
# baseModel = tf.keras.applications.nasnet.NASNetMobile(weights='imagenet')
# print("Base Mobile Model summary")
# print(baseModel.summary())
baseModel = tf.keras.applications.nasnet.NASNetMobile(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model no top summary")
print(baseModel.summary())
if retrainAll:
baseModel.trainable = True
else:
baseModel.trainable = False
# baseModel.trainable = False
# base_output = baseModel.layers[-2].output # layer number obtained from model summary above
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
if classifierLayer > 0:
if dropout > 0:
base_output = tf.keras.layers.Dropout(dropout)(base_output)
base_output = tf.keras.layers.Dense(classifierLayer, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class SR_NASNetLarge(): # jbdm_v5_5(): # NASNetLarge
def build(num_class, input_size=(128, 128, 3), classifierLayer=512, dropout=0, pretrained_weights=None, lr=1e-3, retrainAll=False):
# baseModel = tf.keras.applications.nasnet.NASNetLarge(weights='imagenet')
# print("Base Mobile Model summary")
# print(baseModel.summary())
baseModel = tf.keras.applications.nasnet.NASNetLarge(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model no top summary")
print(baseModel.summary())
if retrainAll:
baseModel.trainable = True
else:
baseModel.trainable = False
# baseModel.trainable = False
# base_output = baseModel.layers[-2].output # layer number obtained from model summary above
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
if classifierLayer > 0:
if dropout > 0:
base_output = tf.keras.layers.Dropout(dropout)(base_output)
base_output = tf.keras.layers.Dense(classifierLayer, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class SR_EfficientNetB0(): # jbdm_v6(): # EfficientNetB0
def build(num_class, input_size=(128, 128, 3), classifierLayer=512, dropout=0, pretrained_weights=None, lr=1e-3, retrainAll=False):
baseModel = tf.keras.applications.efficientnet.EfficientNetB0(weights='imagenet')
print("Base Model summary")
print(baseModel.summary())
baseModel = tf.keras.applications.efficientnet.EfficientNetB0(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model no top summary")
print(baseModel.summary())
if retrainAll:
baseModel.trainable = True
else:
baseModel.trainable = False
# baseModel.trainable = False
# base_output = baseModel.layers[-2].output # layer number obtained from model summary above
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
if classifierLayer > 0:
if dropout > 0:
base_output = tf.keras.layers.Dropout(dropout)(base_output)
base_output = tf.keras.layers.Dense(classifierLayer, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class SR_EfficientNetB7(): # jbdm_v6_7(): # EfficientNetB7
def build(num_class, input_size=(128, 128, 3), classifierLayer=512, dropout=0, pretrained_weights=None, lr=1e-3, retrainAll=False):
# baseModel = tf.keras.applications.efficientnet.EfficientNetB7(weights='imagenet')
# print("Base Model summary")
# print(baseModel.summary())
baseModel = tf.keras.applications.efficientnet.EfficientNetB7(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model no top summary")
print(baseModel.summary())
if retrainAll:
baseModel.trainable = True
else:
baseModel.trainable = False
# baseModel.trainable = False
# base_output = baseModel.layers[-2].output # layer number obtained from model summary above
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
if classifierLayer > 0:
if dropout > 0:
base_output = tf.keras.layers.Dropout(dropout)(base_output)
base_output = tf.keras.layers.Dense(classifierLayer, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class SR_ResNet152V2(): #
def build(num_class, input_size=(128, 128, 3), classifierLayer=512, dropout=0, pretrained_weights=None, lr=1e-3, retrainAll=False):
# baseModel = tf.keras.applications.ResNet152V2(weights='imagenet')
# print("Base Model summary")
# print(baseModel.summary())
baseModel = tf.keras.applications.ResNet152V2(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model no top summary")
print(baseModel.summary())
if retrainAll:
baseModel.trainable = True
else:
baseModel.trainable = False
# baseModel.trainable = False
# base_output = baseModel.layers[-2].output # layer number obtained from model summary above
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
if classifierLayer > 0:
if dropout > 0:
base_output = tf.keras.layers.Dropout(dropout)(base_output)
base_output = tf.keras.layers.Dense(classifierLayer, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class SR_InceptionV3(): #
def build(num_class, input_size=(128, 128, 3), classifierLayer=512, dropout=0, pretrained_weights=None, lr=1e-3, retrainAll=False):
# baseModel = tf.keras.applications.inception_v3.InceptionV3(weights='imagenet')
# print("Base Model summary")
# print(baseModel.summary())
baseModel = tf.keras.applications.inception_v3.InceptionV3(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model no top summary")
print(baseModel.summary())
if retrainAll:
baseModel.trainable = True
else:
baseModel.trainable = False
# baseModel.trainable = False
# base_output = baseModel.layers[-2].output # layer number obtained from model summary above
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
if classifierLayer > 0:
if dropout > 0:
base_output = tf.keras.layers.Dropout(dropout)(base_output)
base_output = tf.keras.layers.Dense(classifierLayer, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class SR_VGG19(): #
def build(num_class, input_size=(128, 128, 3), classifierLayer=512, dropout=0, pretrained_weights=None, lr=1e-3, retrainAll=False):
# baseModel = tf.keras.applications.vgg19.VGG19(weights='imagenet')
# print("Base Model summary")
# print(baseModel.summary())
baseModel = tf.keras.applications.vgg19.VGG19(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model no top summary")
print(baseModel.summary())
if retrainAll:
baseModel.trainable = True
else:
baseModel.trainable = False
# baseModel.trainable = False
# base_output = baseModel.layers[-2].output # layer number obtained from model summary above
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
if classifierLayer > 0:
if dropout > 0:
base_output = tf.keras.layers.Dropout(dropout)(base_output)
base_output = tf.keras.layers.Dense(classifierLayer, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
class SR_VGG16(): #
def build(num_class, input_size=(128, 128, 3), classifierLayer=512, dropout=0, pretrained_weights=None, lr=1e-3, retrainAll=False):
# baseModel = tf.keras.applications.vgg16.VGG16(weights='imagenet')
# print("Base Model summary")
# print(baseModel.summary())
baseModel = tf.keras.applications.vgg16.VGG16(weights='imagenet',
include_top=False,
input_tensor=Input(shape=input_size)
)
print("Base Model no top summary")
print(baseModel.summary())
if retrainAll:
baseModel.trainable = True
else:
baseModel.trainable = False
# baseModel.trainable = False
# base_output = baseModel.layers[-2].output # layer number obtained from model summary above
base_output = baseModel.output
base_output = tf.keras.layers.GlobalAveragePooling2D()(base_output)
if classifierLayer > 0:
if dropout > 0:
base_output = tf.keras.layers.Dropout(dropout)(base_output)
base_output = tf.keras.layers.Dense(classifierLayer, activation="relu")(base_output)
new_output = tf.keras.layers.Dense(num_class, activation="softmax")(base_output)
new_model = tf.keras.models.Model(inputs=baseModel.inputs,
outputs=new_output)
new_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("Model summary")
print(new_model.summary())
if pretrained_weights:
new_model.load_weights(pretrained_weights)
return new_model
| 27,929 | 331 | 784 |
62af5d826924a1995d33969f91b28ac721cf4f20 | 315 | py | Python | src/IceRayPy/core/material/instruction/label/coord3d/__init__.py | dmilos/IceRay | 4e01f141363c0d126d3c700c1f5f892967e3d520 | [
"MIT-0"
] | 2 | 2020-09-04T12:27:15.000Z | 2022-01-17T14:49:40.000Z | src/IceRayPy/core/material/instruction/label/coord3d/__init__.py | dmilos/IceRay | 4e01f141363c0d126d3c700c1f5f892967e3d520 | [
"MIT-0"
] | null | null | null | src/IceRayPy/core/material/instruction/label/coord3d/__init__.py | dmilos/IceRay | 4e01f141363c0d126d3c700c1f5f892967e3d520 | [
"MIT-0"
] | 1 | 2020-09-04T12:27:52.000Z | 2020-09-04T12:27:52.000Z | print( '<' + __name__ + ' name=\'' + __file__ + '\'>' )
import IceRayPy.core.material.instruction.label.coord3d.const
import IceRayPy.core.material.instruction.label.coord3d.dynamic
import IceRayPy.core.material.instruction.label.coord3d.temp
print( '</' + __name__ + ' name=\'' + __file__ + '\'>' )
| 35 | 64 | 0.67619 | print( '<' + __name__ + ' name=\'' + __file__ + '\'>' )
import IceRayPy.core.material.instruction.label.coord3d.const
import IceRayPy.core.material.instruction.label.coord3d.dynamic
import IceRayPy.core.material.instruction.label.coord3d.temp
print( '</' + __name__ + ' name=\'' + __file__ + '\'>' )
| 0 | 0 | 0 |
6a202ec48b62e2ef418df09100899c4db565b4c5 | 3,330 | py | Python | constraintModuleTP.py | eigeneddie/multibodydynamics | ed8bb9bbfb3ba31a3744aab51a48bae68ad9167c | [
"MIT"
] | 1 | 2022-03-17T10:56:42.000Z | 2022-03-17T10:56:42.000Z | constraintModuleTP.py | eigeneddie/multibodydynamics | ed8bb9bbfb3ba31a3744aab51a48bae68ad9167c | [
"MIT"
] | null | null | null | constraintModuleTP.py | eigeneddie/multibodydynamics | ed8bb9bbfb3ba31a3744aab51a48bae68ad9167c | [
"MIT"
] | 1 | 2022-03-29T13:40:46.000Z | 2022-03-29T13:40:46.000Z | #equations for constraints
import numpy as np
from calcModuleTP import ATransformMatrixTHETA as A_Theta, link2index
from calcModuleTP import ATransformMatrix as A_i | 34.329897 | 78 | 0.64024 | #equations for constraints
import numpy as np
from calcModuleTP import ATransformMatrixTHETA as A_Theta, link2index
from calcModuleTP import ATransformMatrix as A_i
def constraintEquation(r1A, r1B, r2B, r2C, r3C):
constraintVector = np.zeros((6,1))
# Pin joint A
constraintPinA = -r1A
for i in range(np.size(constraintPinA)):
# Equation 1-2
constraintVector[i] = constraintPinA[i]
# Pin joint B
constraintPinB = revolutJoint(r1B, r2B)
for i in range(np.size(constraintPinB)):
# Equation 3-4
constraintVector[i+2] = constraintPinB[i]
# Pin joint C
constraintPinC = revolutJoint(r2C, r3C)
for i in range(np.size(constraintPinC)):
# Equation 3-4
constraintVector[i+4] = constraintPinC[i]
return constraintVector
def jacobianMatrix(qi, u_bar_1A, u_bar_1B, u_bar_2B, u_bar_2C, u_bar_3C):
genCoor = np.size(qi) # number of generalized coordinates
constEq = 6 # number of constraint equations
jacobianMatrixCq = np.zeros((constEq, genCoor))
identity2x2 = np.identity(2)
# row 1-2
Cq12 = np.dot(A_Theta(qi[link2index(1,"theta")]), u_bar_1A)
jacobianMatrixCq[0:2,0:2] = -identity2x2
jacobianMatrixCq[0:2,2:3] = -Cq12
# row 3-4 (r1A = r2A)
Cq34_link1 = np.dot(A_Theta(qi[link2index(1,"theta")]), u_bar_1B)
Cq34_link2 = np.dot(A_Theta(qi[link2index(2,"theta")]), u_bar_2B)
jacobianMatrixCq[2:4,0:2] = identity2x2
jacobianMatrixCq[2:4,2:3] = Cq34_link1
jacobianMatrixCq[2:4,3:5] = -identity2x2
jacobianMatrixCq[2:4,5:6] = -Cq34_link2
# row 5-6 (r2C = r3C)
Cq56_link2 = np.dot(A_Theta(qi[link2index(2,"theta")]), u_bar_2C)
Cq56_link3 = np.dot(A_Theta(qi[link2index(3,"theta")]), u_bar_3C)
jacobianMatrixCq[4:6,3:5] = identity2x2
jacobianMatrixCq[4:6,5:6] = Cq56_link2
jacobianMatrixCq[4:6,6:8] = -identity2x2
jacobianMatrixCq[4:6,8:9] = -Cq56_link3
# SLICING
# a. jacobian dependent
jacobian_dependent = np.concatenate((jacobianMatrixCq[:,0:2],
jacobianMatrixCq[:,3:5],
jacobianMatrixCq[:,6:8]), axis = 1)
# b. jacobian independent
jacobian_independent = np.concatenate((jacobianMatrixCq[:,2:3],
jacobianMatrixCq[:,5:6],
jacobianMatrixCq[:,8:9]), axis = 1)
return jacobianMatrixCq, jacobian_dependent, jacobian_independent
def positionAnalysis(constraintVector, jacobianMatrix, qi):
inverse_jacobian = np.linalg.inv(jacobianMatrix)
delta_qi = - np.matmul(inverse_jacobian, constraintVector)
delta_qi_norm = np.linalg.norm(delta_qi)
qi = qi + delta_qi
return qi, delta_qi_norm
def QdCalc1(qi, qiDot, u_bar_iP, i):
id = link2index(i, "theta")
Qd = np.square(float(qiDot[id]))*np.dot(A_i(qi[id]), u_bar_iP)
return Qd
def QdCalc2(qi, qiDot, u_bar_iP, u_bar_jP, i, j):
id = link2index(i, "theta")
jd = link2index(j, "theta")
Qda = np.square(float(qiDot[id]))*np.dot(A_i(qi[id]), u_bar_iP)
Qdb = np.square(float(qiDot[jd]))*np.dot(A_i(qi[jd]), u_bar_jP)
Qd = Qda-Qdb
return Qd
def revolutJoint (riP, riJ):
constraintPin = riP-riJ
return constraintPin | 3,007 | 0 | 139 |
d2f81c1e04e9d7454c8ea37a8e05f85b708b6431 | 8,346 | py | Python | Tic_tac_toe.py | yarinl3/Tic-Tac-Toe | c915643f09162dafb48fb9cca2a42beeb5e5734c | [
"Apache-2.0"
] | 1 | 2021-05-21T12:46:30.000Z | 2021-05-21T12:46:30.000Z | Tic_tac_toe.py | yarinl3/Tic-Tac-Toe | c915643f09162dafb48fb9cca2a42beeb5e5734c | [
"Apache-2.0"
] | null | null | null | Tic_tac_toe.py | yarinl3/Tic-Tac-Toe | c915643f09162dafb48fb9cca2a42beeb5e5734c | [
"Apache-2.0"
] | 3 | 2021-05-29T06:18:02.000Z | 2021-06-20T16:44:47.000Z | import tkinter as tk
import random
board_layout = []
utilities = {}
turn = 'X'
radio_button = '0'
winner = ''
count = 0
def minimax(board, min_max):
"""Computes all possible ways to proceed from the current state and selects the optimal way."""
result = win_draw(board)
if result != 2:
return result, None
maximum = -1
minimum = 1
best_index = (0, 0)
for index in empty_cells(board):
new_board = [i.copy() for i in board]
# puts in the board X or O according the turn
new_board[index[0]][index[1]] = 'O' if min_max is True else 'X'
# the recursive step
result = minimax(new_board, not min_max)[0]
# computer turn
if min_max is True:
# improvement of the algorithm for saving unnecessary steps
if result == 1:
return 1, index
# Finds the maximum result out of the possible ways and its index (one step from the current board)
if maximum <= result:
maximum = result
best_index = index
# player turn
else:
# improvement of the algorithm for saving unnecessary steps
if result == -1:
return -1, index
# Finds the minimum result out of the possible ways and its index (one step from the current board)
if minimum >= result:
minimum = result
best_index = index
# returns the result and the optimal index
return (maximum, best_index) if min_max is True else (minimum, best_index)
def labels_bind():
""" Enables clicking on the grid """
for i in range(len(board_layout)):
board_layout[i].bind("<Button-1>", func=lambda event, item=i: player_step(board_layout[item]))
def labels_unbind():
""" Disables clicking on the grid """
for i in range(len(board_layout)):
board_layout[i].bind("<Button-1>", func=lambda x: x)
if __name__ == "__main__":
main()
| 34.345679 | 114 | 0.59178 | import tkinter as tk
import random
board_layout = []
utilities = {}
turn = 'X'
radio_button = '0'
winner = ''
count = 0
def comp_step():
global turn
current_board = get_matrix_board()
# random step
if radio_button == '1':
labels_unbind()
# delay between player step and computer step
utilities['root'].after(500, lambda: freeze_game(random.choice(empty_cells(current_board))))
# smart step (minimax algorithm)
if radio_button == '2':
# checks if it's the computer's turn
if turn == 'O':
labels_unbind()
# gets the optimal index for computer step
index = minimax(current_board, True)[1]
# delay between player step and computer step
utilities['root'].after(500, lambda: freeze_game(index))
def get_matrix_board():
board = [[], [], []]
for i in range(len(board_layout)):
# converts the visual board to a matrix
board[i // 3].append(board_layout[i]['text'])
return board
def empty_cells(board):
# returns a indexes list of the empty cells
return [(row, column) for row in range(3) for column in range(3) if board[row][column] == '']
def minimax(board, min_max):
"""Computes all possible ways to proceed from the current state and selects the optimal way."""
result = win_draw(board)
if result != 2:
return result, None
maximum = -1
minimum = 1
best_index = (0, 0)
for index in empty_cells(board):
new_board = [i.copy() for i in board]
# puts in the board X or O according the turn
new_board[index[0]][index[1]] = 'O' if min_max is True else 'X'
# the recursive step
result = minimax(new_board, not min_max)[0]
# computer turn
if min_max is True:
# improvement of the algorithm for saving unnecessary steps
if result == 1:
return 1, index
# Finds the maximum result out of the possible ways and its index (one step from the current board)
if maximum <= result:
maximum = result
best_index = index
# player turn
else:
# improvement of the algorithm for saving unnecessary steps
if result == -1:
return -1, index
# Finds the minimum result out of the possible ways and its index (one step from the current board)
if minimum >= result:
minimum = result
best_index = index
# returns the result and the optimal index
return (maximum, best_index) if min_max is True else (minimum, best_index)
def win_draw(board):
# checks win
for i in range(3):
# checks rows
if board[i][0] == board[i][1] == board[i][2]:
if board[i][0] != '':
return 1 if board[i][0] == 'O' else -1 if board[i][0] == 'X' else 2
# checks columns
if board[0][i] == board[1][i] == board[2][i]:
if board[0][i] != '':
return 1 if board[0][i] == 'O' else -1 if board[0][i] == 'X' else 2
# checks diagonals
if (board[0][0] == board[1][1] == board[2][2]) or (board[2][0] == board[1][1] == board[0][2]):
if board[1][1] != '':
return 1 if board[1][1] == 'O' else -1 if board[1][1] == 'X' else 2
# checks draw
return 0 if '' not in [board[i][j] for i in range(3) for j in range(3)] else 2
def freeze_game(index):
# changes the pressed label to 'O'
board_layout[index[0] * 3 + index[1]].config(text='O')
labels_bind()
check_win_draw()
def check_win_draw():
global winner
global turn
global count
if winner == '':
current_board = get_matrix_board()
result = win_draw(current_board)
if result in [1, -1]:
winner = turn
utilities['turn_label']['text'] = f'The winner is {winner}'
labels_unbind()
if result == 0:
utilities['turn_label']['text'] = 'Draw'
if result == 2:
turn = 'O' if turn == 'X' else 'X'
utilities['turn_label']['text'] = f'Turn {turn}'
count += 1
utilities['counter']['text'] = f'Moves: {count}'
return result
def player_step(label):
if label['text'] == '':
label.config(text=turn)
# Checks if you are playing against the computer and checks if there is no winner or draw
if check_win_draw() == 2 and radio_button != '0':
comp_step()
def labels_bind():
""" Enables clicking on the grid """
for i in range(len(board_layout)):
board_layout[i].bind("<Button-1>", func=lambda event, item=i: player_step(board_layout[item]))
def labels_unbind():
""" Disables clicking on the grid """
for i in range(len(board_layout)):
board_layout[i].bind("<Button-1>", func=lambda x: x)
def grid_all():
for i in range(len(board_layout)):
board_layout[i].grid(row=(i // 3), column=(i % 3))
utilities['turn_label'].grid(row=0)
utilities['counter'].grid(row=1)
utilities['restart'].grid(row=2)
# Sets the radio buttons (pv)
index = 0
for key in utilities:
if key.startswith('pv'):
utilities[key].grid(row=index, pady=5, padx=5, sticky='W')
index += 1
def new_game():
# reset all labels and global variables
global winner
global turn
global count
global board_layout
global utilities
winner = ''
turn = 'X'
count = 0
labels_bind()
for i in range(len(board_layout)):
board_layout[i]['text'] = ''
utilities['turn_label']['text'] = f'Turn {turn}'
utilities['counter']['text'] = f'Moves: {count}'
def change_player(radio):
global radio_button
radio_button = radio.get()
new_game()
def main():
global count
global board_layout
global utilities
root = tk.Tk()
root.wm_title('Tic Tac Toe')
frame = tk.Frame(root)
FONT, FONT2, FONT3 = ('Arial', 70), ('Arial', 20), ('Arial', 16)
# board layout view
top_left = tk.Label(frame, text='', font=FONT, width=2, height=1, relief='solid')
top_middle = tk.Label(frame, text='', font=FONT, width=2, height=1, relief='solid')
top_right = tk.Label(frame, text='', font=FONT, width=2, height=1, relief='solid')
middle_left = tk.Label(frame, text='', font=FONT, width=2, height=1, relief='solid')
middle_middle = tk.Label(frame, text='', font=FONT, width=2, height=1, relief='solid')
middle_right = tk.Label(frame, text='', font=FONT, width=2, height=1, relief='solid')
bottom_left = tk.Label(frame, text='', font=FONT, width=2, height=1, relief='solid')
bottom_middle = tk.Label(frame, text='', font=FONT, width=2, height=1, relief='solid')
bottom_right = tk.Label(frame, text='', font=FONT, width=2, height=1, relief='solid')
# utility items
frame2 = tk.Frame(root)
turn_label = tk.Label(frame2, text=f'Turn {turn}', font=FONT2)
turn_count = tk.Label(frame2, text=f"Moves: {count}", font=FONT2)
restart = tk.Button(frame2, text='New game', command=lambda: new_game(), font=FONT3)
radio = tk.StringVar()
radio.set(0)
frame3 = tk.Frame(root)
player_vs_player = tk.Radiobutton(frame3, text='Player vs Player', command=(lambda: change_player(radio)),
variable=radio, value=0)
player_vs_computer = tk.Radiobutton(frame3, text='Player vs Computer', command=(lambda: change_player(radio)),
variable=radio, value=1)
player_vs_smart_computer = tk.Radiobutton(frame3, text='Player vs Smart Computer',
command=(lambda: change_player(radio)), variable=radio, value=2)
utilities = {
'turn_label': turn_label,
'restart': restart,
'root': root,
'pvp': player_vs_player,
'pvc': player_vs_computer,
'pvsc': player_vs_smart_computer,
'counter': turn_count
}
board_layout = [top_left, top_middle, top_right, middle_left, middle_middle, middle_right, bottom_left,
bottom_middle, bottom_right]
grid_all()
labels_bind()
frame.pack()
frame2.pack()
frame3.pack(anchor='w')
root.mainloop()
if __name__ == "__main__":
main()
| 6,090 | 0 | 253 |
4e35865abe13c605a53e965c2c12f57b482f5ab0 | 333 | py | Python | safe_il/__init__.py | Justin-Yuan/safe_imitation | 7528e9649f9921ee70a1386bd0c00b1e462717fd | [
"MIT"
] | null | null | null | safe_il/__init__.py | Justin-Yuan/safe_imitation | 7528e9649f9921ee70a1386bd0c00b1e462717fd | [
"MIT"
] | null | null | null | safe_il/__init__.py | Justin-Yuan/safe_imitation | 7528e9649f9921ee70a1386bd0c00b1e462717fd | [
"MIT"
] | null | null | null | from gym.envs.registration import register
register(
id='SimpleNavigation-v0',
entry_point='safe_il.envs:SimpleNavigation',
max_episode_steps=10000,
reward_threshold=200,
)
register(
id='BoneDrilling2D-v0',
entry_point='safe_il.envs:BoneDrilling2D',
max_episode_steps=10000,
reward_threshold=200,
)
| 20.8125 | 48 | 0.741742 | from gym.envs.registration import register
register(
id='SimpleNavigation-v0',
entry_point='safe_il.envs:SimpleNavigation',
max_episode_steps=10000,
reward_threshold=200,
)
register(
id='BoneDrilling2D-v0',
entry_point='safe_il.envs:BoneDrilling2D',
max_episode_steps=10000,
reward_threshold=200,
)
| 0 | 0 | 0 |
220163218b2ab6db5985edc6eb5d2d6dc8d5ed96 | 500 | py | Python | sample_contract/tests/test_integration.py | startled-cat/two-factor-over-blockchain | 2621eef2449b58b728fbb2b0b1ca910be397acf5 | [
"MIT"
] | 1 | 2022-03-31T16:35:05.000Z | 2022-03-31T16:35:05.000Z | sample_contract/tests/test_integration.py | startled-cat/two-factor-over-blockchain | 2621eef2449b58b728fbb2b0b1ca910be397acf5 | [
"MIT"
] | null | null | null | sample_contract/tests/test_integration.py | startled-cat/two-factor-over-blockchain | 2621eef2449b58b728fbb2b0b1ca910be397acf5 | [
"MIT"
] | null | null | null | from brownie import network, config, AuthenticatorProvider
from scripts.utils import get_account, is_network_local
from scripts.deploy import deploy
import pytest
| 31.25 | 65 | 0.748 | from brownie import network, config, AuthenticatorProvider
from scripts.utils import get_account, is_network_local
from scripts.deploy import deploy
import pytest
def test_generate_otp():
if is_network_local():
pytest.skip("Only for integration testing")
contract = AuthenticatorProvider[-1]
account = get_account(0)
tx = contract.generateOtp(account.address, {"from": account})
(password, generatedAt) = contract.otp(account.address)
assert len(str(password)) > 0
| 312 | 0 | 23 |
21a7cd2c33be4cd790ab94e03d386d2c6df351d8 | 1,315 | py | Python | P0019-Counting-Sundays/counting_sundays.py | kabhari/Project_Euler | e9aba54ae1e03aaf5311fdc615e85cf4c91b25e3 | [
"MIT"
] | null | null | null | P0019-Counting-Sundays/counting_sundays.py | kabhari/Project_Euler | e9aba54ae1e03aaf5311fdc615e85cf4c91b25e3 | [
"MIT"
] | null | null | null | P0019-Counting-Sundays/counting_sundays.py | kabhari/Project_Euler | e9aba54ae1e03aaf5311fdc615e85cf4c91b25e3 | [
"MIT"
] | null | null | null | '''
Q:You are given the following information, but you may prefer to do some research for yourself.
1 Jan 1900 was a Monday.
Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
'''
'''
final answer: 171
'''
# init constant
months = [
31, # Jan
-1, # Feb - this will be filled later depending on the year
31, # Mar
30, # Apr
31, # May
30, # Jun
31, # Jul
31, # Aug
30, # Sep
31, # Oct
30, # Nov
31, # Dec
]
# compute
if __name__ == '__main__':
print(" " + str(compute()) + " ")
| 23.482143 | 105 | 0.601521 | '''
Q:You are given the following information, but you may prefer to do some research for yourself.
1 Jan 1900 was a Monday.
Thirty days has September,
April, June and November.
All the rest have thirty-one,
Saving February alone,
Which has twenty-eight, rain or shine.
And on leap years, twenty-nine.
A leap year occurs on any year evenly divisible by 4, but not on a century unless it is divisible by 400.
How many Sundays fell on the first of the month during the twentieth century (1 Jan 1901 to 31 Dec 2000)?
'''
'''
final answer: 171
'''
# init constant
months = [
31, # Jan
-1, # Feb - this will be filled later depending on the year
31, # Mar
30, # Apr
31, # May
30, # Jun
31, # Jul
31, # Aug
30, # Sep
31, # Oct
30, # Nov
31, # Dec
]
def compute_sundays(start, end):
sun_sum = 0
remainder = 0
for y in range(start, end + 1):
months[1] = 28 + (y % 4 == 0 and y % 400 == 0);
for m in range(len(months)):
remainder += months[m] % 7
if remainder >= 7:
remainder = remainder % 7
if remainder == 6:
sun_sum += 1
return sun_sum
# compute
def compute():
return compute_sundays(1901, 2000)
if __name__ == '__main__':
print(" " + str(compute()) + " ")
| 398 | 0 | 45 |
27013a1d9d0d07be5a3d6e567dc9f3e425349f7b | 328 | py | Python | single-audit/resolve_findings/admin.py | Jkrzy/federal-grant-reporting | c6d5d489a7e9b6013031cbe0a426170881dccd42 | [
"CC0-1.0"
] | null | null | null | single-audit/resolve_findings/admin.py | Jkrzy/federal-grant-reporting | c6d5d489a7e9b6013031cbe0a426170881dccd42 | [
"CC0-1.0"
] | null | null | null | single-audit/resolve_findings/admin.py | Jkrzy/federal-grant-reporting | c6d5d489a7e9b6013031cbe0a426170881dccd42 | [
"CC0-1.0"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import Finding, Agency, Grant, Grantee, User, Comment
admin.site.register(User, UserAdmin)
admin.site.register(Agency)
admin.site.register(Finding)
admin.site.register(Grant)
admin.site.register(Grantee)
admin.site.register(Comment)
| 29.818182 | 66 | 0.817073 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import Finding, Agency, Grant, Grantee, User, Comment
admin.site.register(User, UserAdmin)
admin.site.register(Agency)
admin.site.register(Finding)
admin.site.register(Grant)
admin.site.register(Grantee)
admin.site.register(Comment)
| 0 | 0 | 0 |
94893ae0692c8b62b53846e1c462f83d7e410b1b | 4,622 | py | Python | nearest_correlation_unittests.py | mikecroucher/nearest_correlation | c04e9b61552d9d2c118c95eb711088ef173ae80f | [
"BSD-3-Clause"
] | 27 | 2015-08-11T23:10:13.000Z | 2022-02-23T15:03:51.000Z | nearest_correlation_unittests.py | mikecroucher/nearest_correlation | c04e9b61552d9d2c118c95eb711088ef173ae80f | [
"BSD-3-Clause"
] | 1 | 2018-03-05T14:49:00.000Z | 2018-03-13T11:03:27.000Z | nearest_correlation_unittests.py | mikecroucher/nearest_correlation | c04e9b61552d9d2c118c95eb711088ef173ae80f | [
"BSD-3-Clause"
] | 17 | 2016-01-16T11:22:28.000Z | 2022-03-19T15:46:55.000Z | import unittest
import numpy as np
import nearest_correlation
from nearest_correlation import nearcorr
# References
# [1] 'Computing the nearest correlation matrix - a problem from finance': Higham, IMA Journal of Numerical Analysis (2002) 22, 329.343
# This test is taken from the example given in the
# NAG Mark 24 documentation for g02aa
# It originally appeared in [1]
# This example taken from [1]
# This uses the same input matrix as test_HighamExample2002
# but I made up the weights vector since I couldn't find an example. No idea if it makes sense or not
# Higham's MATLAB original was used as an oracle
# A single calculation that fails after 3 iterations should give the same result as three calculations
# that each perform 1 iteration, restarting where they left off
# Ensure that an exception is raised when a non-symmetric matrix is passed
# Ensure that an exception is raised when calculation does not converge befer maxiterations is exceeded
# Ensure that an exception is not raised when calculation does not converge befer maxiterations is exceeded
# and except_on_too_many_iterations = False
if __name__ == '__main__':
main() | 35.015152 | 135 | 0.546733 | import unittest
import numpy as np
import nearest_correlation
from nearest_correlation import nearcorr
# References
# [1] 'Computing the nearest correlation matrix - a problem from finance': Higham, IMA Journal of Numerical Analysis (2002) 22, 329.343
class ResultsTests(unittest.TestCase):
# This test is taken from the example given in the
# NAG Mark 24 documentation for g02aa
# It originally appeared in [1]
def test_NAGExample(self):
A = np.array([[2, -1, 0, 0],
[-1, 2, -1, 0],
[0, -1, 2, -1],
[0, 0, -1, 2]])
X = nearcorr(A)
expected_result = np.array([[ 1. , -0.8084125 , 0.1915875 , 0.10677505],
[-0.8084125 , 1. , -0.65623269, 0.1915875 ],
[ 0.1915875 , -0.65623269, 1. , -0.8084125 ],
[ 0.10677505, 0.1915875 , -0.8084125 , 1. ]])
self.assertTrue((np.abs((X - expected_result)) < 1e-8).all())
# This example taken from [1]
def test_HighamExample2002(self):
A = np.array([[1, 1, 0],
[1, 1, 1],
[0, 1, 1]])
X = nearcorr(A)
expected_result = np.array([[ 1. , 0.76068985, 0.15729811],
[ 0.76068985, 1. , 0.76068985],
[ 0.15729811, 0.76068985, 1. ]])
self.assertTrue((np.abs((X - expected_result)) < 1e-8).all())
# This uses the same input matrix as test_HighamExample2002
# but I made up the weights vector since I couldn't find an example. No idea if it makes sense or not
# Higham's MATLAB original was used as an oracle
def test_Weights(self):
A = np.array([[1, 1, 0],
[1, 1, 1],
[0, 1, 1]])
weights = np.array([1,2,3])
X = nearcorr(A, weights = weights)
expected_result = np.array([[ 1. , 0.66774961, 0.16723692],
[ 0.66774961, 1. , 0.84557496],
[ 0.16723692, 0.84557496, 1. ]])
self.assertTrue((np.abs((X - expected_result)) < 1e-8).all())
# A single calculation that fails after 3 iterations should give the same result as three calculations
# that each perform 1 iteration, restarting where they left off
def test_restart(self):
A = np.array([[1, 1, 0],
[1, 1, 1],
[0, 1, 1]])
# Do 3 iterations on A and gather the result
try:
Y = nearcorr(A, max_iterations=3)
except nearest_correlation.ExceededMaxIterationsError as e:
result3 = np.copy(e.matrix)
# Do 1 iteration on A
try:
X = nearcorr(A, max_iterations=1)
except nearest_correlation.ExceededMaxIterationsError as e:
restart = e
# restart from previous result and do another iteration
try:
X = nearcorr(restart, max_iterations=1)
except nearest_correlation.ExceededMaxIterationsError as e:
restart = e
# restart from previous result and do another iteration
try:
X = nearcorr(restart, max_iterations=1)
except nearest_correlation.ExceededMaxIterationsError as e:
result1 = e.matrix
self.assertTrue(np.all(result1 == result3))
class InterfaceTests(unittest.TestCase):
# Ensure that an exception is raised when a non-symmetric matrix is passed
def test_AssertSymmetric(self):
A = np.array([[1,1,0],
[1,1,1],
[1,1,1]])
self.assertRaises(ValueError,nearcorr,A)
# Ensure that an exception is raised when calculation does not converge befer maxiterations is exceeded
def test_ExceededMaxIterations(self):
A = np.array([[1,1,0],
[1,1,1],
[0,1,1]])
self.assertRaises(nearest_correlation.ExceededMaxIterationsError,nearcorr,A,max_iterations=10)
# Ensure that an exception is not raised when calculation does not converge befer maxiterations is exceeded
# and except_on_too_many_iterations = False
def test_ExceededMaxIterationsFalse(self):
A = np.array([[1,1,0],
[1,1,1],
[0,1,1]])
X = nearcorr(A,max_iterations=10,except_on_too_many_iterations=False)
def main():
unittest.main()
if __name__ == '__main__':
main() | 3,120 | 36 | 251 |
cdc289740524cab80f88b7625228a331c8f26666 | 898 | py | Python | dodo.py | plmbr/plmbr | 201d9d51fe4678701c84325c9fa907c30afb9cad | [
"MIT"
] | null | null | null | dodo.py | plmbr/plmbr | 201d9d51fe4678701c84325c9fa907c30afb9cad | [
"MIT"
] | 4 | 2021-08-05T16:42:11.000Z | 2021-08-05T16:42:12.000Z | dodo.py | plmbr/plmbr | 201d9d51fe4678701c84325c9fa907c30afb9cad | [
"MIT"
] | null | null | null | from pathlib import Path
from plmbr.version import version
pys = list(Path('.').rglob('*.py'))
mds = list(Path('.').rglob('*.md'))
sdist = Path('dist') / f'plmbr-{version}.tar.gz'
| 19.955556 | 75 | 0.493318 | from pathlib import Path
from plmbr.version import version
pys = list(Path('.').rglob('*.py'))
mds = list(Path('.').rglob('*.md'))
sdist = Path('dist') / f'plmbr-{version}.tar.gz'
def task_test():
return {
'actions': ['pytest -v'],
'file_dep': pys,
}
def task_docs():
return {
'actions': [
'rm -rf docs',
'pdoc --html -f -o pdoc --config show_source_code=False plmbr',
'mv pdoc/plmbr docs'
],
'targets': [],
'file_dep': pys + mds,
'task_dep': ['test'],
'verbosity': 2,
}
def task_build():
return {
'actions': ['python setup.py sdist'],
'targets': [sdist],
'file_dep': pys,
'task_dep': ['test']
}
def task_upload():
return {
'actions': [f'twine upload {sdist}'],
'file_dep': [sdist],
'verbosity': 2,
}
| 621 | 0 | 92 |
976f3108674fcd9bb44bb03cd74eb198c40ff685 | 2,211 | py | Python | src/sunhead/workers/abc.py | webclinic017/sunhead | 5117ec797a38eb82d955241d20547d125efe80f3 | [
"Apache-2.0"
] | 1 | 2021-11-17T22:13:58.000Z | 2021-11-17T22:13:58.000Z | src/sunhead/workers/abc.py | webclinic017/sunhead | 5117ec797a38eb82d955241d20547d125efe80f3 | [
"Apache-2.0"
] | 1 | 2016-05-15T12:40:11.000Z | 2016-05-15T12:40:11.000Z | src/sunhead/workers/abc.py | webclinic017/sunhead | 5117ec797a38eb82d955241d20547d125efe80f3 | [
"Apache-2.0"
] | 1 | 2021-11-17T22:13:55.000Z | 2021-11-17T22:13:55.000Z | """
Abstract base classes for construction asynchronous workers.
"""
from abc import ABCMeta, abstractmethod
from typing import Sequence, Tuple
from aiohttp.web import Application
class AbstractWorker(metaclass=ABCMeta):
"""
This base class provides most basic functionality for the worker.
"""
@property
@abstractmethod
@property
@abstractmethod
@abstractmethod
class AbstractStreamWorker(AbstractWorker):
"""
Base class for the worker, who operates on a stream of events.
"""
@property
@abstractmethod
@abstractmethod
@abstractmethod
class AbstractHttpServerWorker(AbstractWorker):
"""
Base for the HTTP server implementation on top of aiohttp.
"""
@property
@abstractmethod
@abstractmethod
@abstractmethod
@abstractmethod
@abstractmethod
@abstractmethod
@property
@abstractmethod
@abstractmethod
class HttpServerWorkerMixinMeta(ABCMeta):
"""
Ensures that mixin only applies to HttpServerWorker concrete classes.
"""
| 21.466019 | 105 | 0.640886 | """
Abstract base classes for construction asynchronous workers.
"""
from abc import ABCMeta, abstractmethod
from typing import Sequence, Tuple
from aiohttp.web import Application
class AbstractWorker(metaclass=ABCMeta):
"""
This base class provides most basic functionality for the worker.
"""
@property
@abstractmethod
def app_name(self) -> str:
pass
@property
@abstractmethod
def guid(self) -> str:
pass
@abstractmethod
def run(self) -> None:
pass
class AbstractStreamWorker(AbstractWorker):
"""
Base class for the worker, who operates on a stream of events.
"""
@property
@abstractmethod
def stream(self):
pass
@abstractmethod
async def connect_to_stream(self):
pass
@abstractmethod
async def add_subscribers(self):
pass
class AbstractHttpServerWorker(AbstractWorker):
"""
Base for the HTTP server implementation on top of aiohttp.
"""
@property
@abstractmethod
def app(self) -> Application:
pass
@abstractmethod
def create_app(self) -> Application:
pass
@abstractmethod
def get_middlewares(self) -> list:
pass
@abstractmethod
def init_requirements(self, loop) -> None:
pass
@abstractmethod
def add_routers(self) -> None:
pass
@abstractmethod
def get_urlpatterns(self) -> Sequence[Tuple]:
pass
@property
@abstractmethod
def wsgi_app(self) -> Application:
pass
@abstractmethod
def serve(self, srv, handler, loop) -> None:
pass
class HttpServerWorkerMixinMeta(ABCMeta):
"""
Ensures that mixin only applies to HttpServerWorker concrete classes.
"""
def __call__(cls, *args, **kwargs):
# TODO: Figure out how to properly add special methods and checks here for accessing Server class
# if AbstractHttpServerWorker not in cls.__bases__:
# raise TypeError(
# "'HttpServerWorkerMixinMeta' can only be applied "
# "to 'AbstractHttpServerWorker' implementation classes"
# )
return super().__call__(*args, **kwargs)
| 753 | 0 | 391 |
c0cac138dda2d9c3716694edeae27b37936c2d12 | 207 | py | Python | ojar/runes/template.py | vesche/snippets | 7a9d598df99c26c4e0c63669f9f95a94eeed0d08 | [
"Unlicense"
] | 7 | 2016-01-03T19:42:07.000Z | 2018-10-23T14:03:12.000Z | ojar/runes/template.py | vesche/snippets | 7a9d598df99c26c4e0c63669f9f95a94eeed0d08 | [
"Unlicense"
] | null | null | null | ojar/runes/template.py | vesche/snippets | 7a9d598df99c26c4e0c63669f9f95a94eeed0d08 | [
"Unlicense"
] | 1 | 2018-03-09T08:52:01.000Z | 2018-03-09T08:52:01.000Z | # -*- coding: utf-8 -*-
#
# ojar - [?] rune
# https://github.com/vesche/ojar
#
| 17.25 | 67 | 0.63285 | # -*- coding: utf-8 -*-
#
# ojar - [?] rune
# https://github.com/vesche/ojar
#
def info(_):
from runes.common import list_functions
return '[?] rune\nactions: {}'.format(list_functions(__name__))
| 103 | 0 | 23 |
e9f7fe9507a4c8d373b75198f2c2d704817c5f89 | 6,384 | py | Python | DSSCD/criterion/ntxent.py | NeurAI-Lab/D-SSCD | 4edd6401b59e842f23a040535969192fdd943ac0 | [
"MIT"
] | 3 | 2021-11-24T16:15:08.000Z | 2022-02-12T11:14:28.000Z | DSSCD/criterion/ntxent.py | NeurAI-Lab/D-SSCD | 4edd6401b59e842f23a040535969192fdd943ac0 | [
"MIT"
] | null | null | null | DSSCD/criterion/ntxent.py | NeurAI-Lab/D-SSCD | 4edd6401b59e842f23a040535969192fdd943ac0 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from util.utils import positive_mask
import os
import math
import util.utils as utils
import torch.nn.functional as F
class NTXent(nn.Module):
"""
The Normalized Temperature-scaled Cross Entropy Loss
Source: https://github.com/Spijkervet/SimCLR
"""
def forward(self, zx, zy, zx1, zy1, global_step):
"""
zx: projection output of batch zx
zy: projection output of batch zy
:return: normalized loss
"""
positive_samples, negative_samples = self.sample_no_dict(zx, zy, zx1, zy1)
if self.margin:
m = self.temperature * math.log(self.alpha / negative_samples.shape[1])
positive_samples = ((positive_samples * self.temperature) - m) / self.temperature
labels = torch.zeros(self.N).to(positive_samples.device).long()
logits = torch.cat((positive_samples, negative_samples), dim=1)
loss = self.criterion(logits, labels)
loss /= self.N
return loss
def sample_no_dict(self, zx, zy, zx1, zy1):
"""
Negative samples without dictionary
"""
# print(zx.shape)
z = torch.cat((zx, zy, zx1,zy1), dim=0)
sim = self.similarity_f(z.unsqueeze(1), z.unsqueeze(0)) / self.temperature
# print(sim.shape,self.batch_size )
# Splitting the matrix into 4 blocks so as to count number of positive and negative samples
sim_left, sim_right = torch.chunk(sim, 2, dim=1)
sim_lu,sim_ll = torch.chunk(sim_left, 2, dim=0)
sim_ru,sim_rl = torch.chunk(sim_right, 2, dim=0)
# print(sim_lu.shape,self.batch_size )
# Extract positive samples from each block
#sim_xy = torch.diag(sim, self.batch_size)
pos_1 = torch.diag(sim_lu, self.batch_size)
pos_2 = torch.diag(sim_lu, -self.batch_size)
pos_3 = torch.diag(sim_rl, self.batch_size)
pos_4 = torch.diag(sim_rl, -self.batch_size)
# sim_yx = torch.diag(sim, -self.batch_size)
positive_samples = torch.cat((pos_1, pos_2, pos_3, pos_4), dim=0).reshape(self.N, 1)
# Extract negative samples
neg_lu = sim_lu[self.mask].reshape(self.batch_size*2, 2*(self.batch_size-1) )
neg_rl = sim_rl[self.mask].reshape(self.batch_size*2, 2*(self.batch_size-1))
# Concatenating the extracted negatives from sim block left upper and right lower.
neg_u = torch.cat((neg_lu, sim_ru), dim=1)
neg_l = torch.cat((sim_ll, neg_rl), dim=1)
negative_samples = torch.cat((neg_u, neg_l), dim=0)
return positive_samples, negative_samples
class BarlowTwinsLoss(torch.nn.Module):
"""
loss function taken from https://github.com/IgorSusmelj/barlowtwins
paper: https://arxiv.org/abs/2103.03230
"""
| 38.690909 | 308 | 0.578321 | import torch
import torch.nn as nn
from util.utils import positive_mask
import os
import math
import util.utils as utils
import torch.nn.functional as F
class NTXent(nn.Module):
"""
The Normalized Temperature-scaled Cross Entropy Loss
Source: https://github.com/Spijkervet/SimCLR
"""
def __init__(self, args):
super(NTXent, self).__init__()
self.batch_size = args.ssl_batchsize
self.margin = args.margin
self.alpha = args.alpha
self.temperature = args.temperature
self.device = args.device
self.mask = positive_mask(args.ssl_batchsize)
self.criterion = nn.CrossEntropyLoss(reduction="sum")
self.similarity_f = nn.CosineSimilarity(dim=2)
self.N = 4 * self.batch_size
self.zoom = args.zoom
self.zoom_factor = args.zoom_factor
self.writer = args.writer
def forward(self, zx, zy, zx1, zy1, global_step):
"""
zx: projection output of batch zx
zy: projection output of batch zy
:return: normalized loss
"""
positive_samples, negative_samples = self.sample_no_dict(zx, zy, zx1, zy1)
if self.margin:
m = self.temperature * math.log(self.alpha / negative_samples.shape[1])
positive_samples = ((positive_samples * self.temperature) - m) / self.temperature
labels = torch.zeros(self.N).to(positive_samples.device).long()
logits = torch.cat((positive_samples, negative_samples), dim=1)
loss = self.criterion(logits, labels)
loss /= self.N
return loss
def sample_no_dict(self, zx, zy, zx1, zy1):
"""
Negative samples without dictionary
"""
# print(zx.shape)
z = torch.cat((zx, zy, zx1,zy1), dim=0)
sim = self.similarity_f(z.unsqueeze(1), z.unsqueeze(0)) / self.temperature
# print(sim.shape,self.batch_size )
# Splitting the matrix into 4 blocks so as to count number of positive and negative samples
sim_left, sim_right = torch.chunk(sim, 2, dim=1)
sim_lu,sim_ll = torch.chunk(sim_left, 2, dim=0)
sim_ru,sim_rl = torch.chunk(sim_right, 2, dim=0)
# print(sim_lu.shape,self.batch_size )
# Extract positive samples from each block
#sim_xy = torch.diag(sim, self.batch_size)
pos_1 = torch.diag(sim_lu, self.batch_size)
pos_2 = torch.diag(sim_lu, -self.batch_size)
pos_3 = torch.diag(sim_rl, self.batch_size)
pos_4 = torch.diag(sim_rl, -self.batch_size)
# sim_yx = torch.diag(sim, -self.batch_size)
positive_samples = torch.cat((pos_1, pos_2, pos_3, pos_4), dim=0).reshape(self.N, 1)
# Extract negative samples
neg_lu = sim_lu[self.mask].reshape(self.batch_size*2, 2*(self.batch_size-1) )
neg_rl = sim_rl[self.mask].reshape(self.batch_size*2, 2*(self.batch_size-1))
# Concatenating the extracted negatives from sim block left upper and right lower.
neg_u = torch.cat((neg_lu, sim_ru), dim=1)
neg_l = torch.cat((sim_ll, neg_rl), dim=1)
negative_samples = torch.cat((neg_u, neg_l), dim=0)
return positive_samples, negative_samples
class BarlowTwinsLoss(torch.nn.Module):
"""
loss function taken from https://github.com/IgorSusmelj/barlowtwins
paper: https://arxiv.org/abs/2103.03230
"""
def __init__(self, device, lambda_param=5e-3):
super(BarlowTwinsLoss, self).__init__()
self.lambda_param = lambda_param
self.device = device
def forward(self, z_a: torch.Tensor, z_b: torch.Tensor):
# normalize repr. along the batch dimension
z_a_norm = (z_a - z_a.mean(0)) / z_a.std(0) # NxD
z_b_norm = (z_b - z_b.mean(0)) / z_b.std(0) # NxD
z_a_norm = z_a_norm.view(z_a_norm.size(0), z_a_norm.size(1)* z_a_norm.size(2))
z_b_norm = z_b_norm.view(z_b_norm.size(0), z_b_norm.size(1)*z_b_norm.size(2))
N = z_a.size(0)
# D = z_a.size(1)
D = z_a_norm.size(1)
# print (z_a_norm.T.shape, z_b_norm.shape)
# cross-correlation matrix
# c= torch.einsum('yxb,bxy->xy', (z_a_norm.T, z_b_norm))
c = torch.mm(z_a_norm.T, z_b_norm) / N # DxD
# print (c.shape)
# loss
c_diff = (c - torch.eye(D,device=self.device)).pow(2) # DxD
# multiply off-diagonal elems of c_diff by lambda
c_diff[~torch.eye(D, dtype=bool)] *= self.lambda_param
loss = c_diff.sum()
return loss
class BarlowTwinsLoss_CD(torch.nn.Module):
def __init__(self, device, lambda_param=5e-3):
super(BarlowTwinsLoss_CD, self).__init__()
self.lambda_param = lambda_param
self.device = device
def forward(self, z_a: torch.Tensor, z_b: torch.Tensor,z_c: torch.Tensor, z_d: torch.Tensor): #, z_c: torch.Tensor, z_d: torch.Tensor
# normalize repr. along the batch dimension
z_a_norm = (z_a - z_a.mean(0)) / z_a.std(0) # NxD
z_b_norm = (z_b - z_b.mean(0)) / z_b.std(0) # NxD
z_c_norm = (z_c - z_c.mean(0)) / z_c.std(0) # NxD
z_d_norm = (z_d - z_d.mean(0)) / z_d.std(0) # NxD
N = z_a.size(0)
D = z_a.size(1)
# print(z_a_norm.shape)
# cross-correlation matrix
c1 = torch.mm(z_a_norm.T, z_b_norm) / N # DxD
c2 = torch.mm(z_c_norm.T, z_d_norm) / N # DxD
# loss
c_diff1 = (c1 - torch.eye(D,device=self.device)).pow(2) # DxD
# multiply off-diagonal elems of c_diff by lambda
c_diff1[~torch.eye(D, dtype=bool)] *= self.lambda_param
loss1 = c_diff1.sum()
# loss
# c_diff2 = (c2 - torch.eye(D, device=self.device)).pow(2) # DxD
# # multiply off-diagonal elems of c_diff by lambda
# c_diff2[~torch.eye(D, dtype=bool)] *= self.lambda_param
# loss2 = c_diff2.sum()
# loss = loss1 + loss2
loss = loss1
return loss
| 3,396 | 21 | 156 |
4012346eb823de800b9d598c5263f82065ff1ff7 | 4,748 | py | Python | src/fetch_civil_names.py | hugonmelo/serenata-de-amor | fe9c5602c5ff496f8197777274b3c576faee1472 | [
"MIT"
] | 1 | 2017-04-29T02:39:34.000Z | 2017-04-29T02:39:34.000Z | src/fetch_civil_names.py | hugonmelo/serenata-de-amor | fe9c5602c5ff496f8197777274b3c576faee1472 | [
"MIT"
] | null | null | null | src/fetch_civil_names.py | hugonmelo/serenata-de-amor | fe9c5602c5ff496f8197777274b3c576faee1472 | [
"MIT"
] | 1 | 2019-06-02T20:48:37.000Z | 2019-06-02T20:48:37.000Z | import datetime
import os
import requests
import re
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
if __name__ == '__main__':
civil_names = CivilNames()
civil_names.write_civil_file(civil_names.get_civil_names())
| 33.43662 | 100 | 0.620682 | import datetime
import os
import requests
import re
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
class CivilNames:
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_PATH = os.path.join(BASE_DIR, 'data')
DATE = datetime.date.today().strftime('%Y-%m-%d')
FILE_BASE_NAME = '{}-congressperson-civil-names.xz'.format(DATE)
PRIMARY_URL = 'http://www.camara.leg.br/Internet/deputado/Dep_Detalhe.asp?id={}'
SECONDARY_URL = 'http://www2.camara.leg.br/deputados/pesquisa/layouts_deputados_biografia?pk={}'
CSV_PARAMS = {
'compression': 'xz',
'encoding': 'utf-8',
'index': False
}
def __init__(self):
self.total = 0
def find_newest_file(self, name):
date_regex = re.compile('\d{4}-\d{2}-\d{2}')
matches = (date_regex.findall(f) for f in os.listdir(self.DATA_PATH))
dates = sorted(set([l[0] for l in matches if l]), reverse=True)
for date in dates:
filename = '{}-{}.xz'.format(date, name)
filepath = os.path.join(self.DATA_PATH, filename)
if os.path.isfile(filepath):
return filepath
return None
def read_csv(self, name):
newest_file = self.find_newest_file(name)
if newest_file is None:
msg = 'Could not find the dataset for {}.'.format(newest_file)
raise TypeError(msg)
return pd.read_csv(newest_file, dtype={'congressperson_id': np.str})
def get_all_congresspeople_ids(self):
print('Fetching all congresspeople ids...')
datasets = ('current-year', 'last-year', 'previous-years')
ids = (self.read_csv(name)['congressperson_id'] for name in datasets)
distinct_ids = pd.concat(ids).unique()
self.total = len(distinct_ids)
yield from (str(idx).strip() for idx in distinct_ids)
def write_civil_file(self, congressperson_civil_names):
df = pd.DataFrame(data=congressperson_civil_names)
print('Writing file...')
filepath = os.path.join(self.DATA_PATH, self.FILE_BASE_NAME)
df.to_csv(filepath, **self.CSV_PARAMS)
print('Done.')
@staticmethod
def parse_primary_repository(data, congress_id):
try:
soup = BeautifulSoup(data, 'html.parser')
attrs = {'class': 'visualNoMarker'}
attributes = soup.findAll('ul', attrs=attrs)[0]
line_name = attributes.find('li')
[x.extract() for x in line_name('strong')] # extract tag strong
civil_name = str(line_name.text.strip()).upper()
return dict(congressperson_id=congress_id, civil_name=civil_name)
except IndexError:
print('Could not parse data')
@staticmethod
def parse_secondary_repository(data, congress_id):
try:
soup = BeautifulSoup(data, 'html.parser')
attributes = soup.findAll('div', attrs={'class': 'bioDetalhes'})[0]
line_name = attributes.find('strong')
civil_name = str(line_name.text.strip()).upper()
return dict(congressperson_id=congress_id, civil_name=civil_name)
except IndexError:
print('Could not parse data')
@staticmethod
def fetch_repository(url, congressperson_id, parser):
page = requests.get(url)
if page.status_code != 200:
msg = 'HTTP request to {} failed with status code {}'
print(msg.format(url, page.status_code))
return
data = str(page.content.decode('utf-8'))
return parser(data, congressperson_id)
def fetch_data_repository(self, congress_id):
primary_url = self.PRIMARY_URL.format(congress_id)
data = self.fetch_repository(
primary_url,
congress_id,
self.parse_primary_repository
)
if not data:
secondary_url = self.SECONDARY_URL.format(congress_id)
return self.fetch_repository(
secondary_url,
congress_id,
self.parse_secondary_repository
)
return data
def get_civil_names(self):
congresspeople_ids = self.get_all_congresspeople_ids()
for ind, congress_id in enumerate(congresspeople_ids):
if not np.math.isnan(float(congress_id)):
percentage = (ind / self.total * 100)
msg = 'Processed {} out of {} ({:.2f}%)'
print(msg.format(ind, self.total, percentage), end='\r')
yield dict(self.fetch_data_repository(congress_id))
if __name__ == '__main__':
civil_names = CivilNames()
civil_names.write_civil_file(civil_names.get_civil_names())
| 3,619 | 861 | 23 |
3d5ab70c8fcf79575a7d53aca2da45e76aa7395d | 1,982 | py | Python | gc_prometheus/stats.py | korfuri/python-gc-prometheus | 32f64f11b57354fcaac38e0e26c33cfe895393ab | [
"Apache-2.0"
] | 2 | 2015-06-24T10:10:04.000Z | 2019-07-24T13:18:39.000Z | gc_prometheus/stats.py | korfuri/python-gc-prometheus | 32f64f11b57354fcaac38e0e26c33cfe895393ab | [
"Apache-2.0"
] | null | null | null | gc_prometheus/stats.py | korfuri/python-gc-prometheus | 32f64f11b57354fcaac38e0e26c33cfe895393ab | [
"Apache-2.0"
] | 1 | 2019-09-29T01:15:37.000Z | 2019-09-29T01:15:37.000Z | import gc
import prometheus_client
import sys
enabled = prometheus_client.Gauge(
'python_gc_enabled', 'Whether the garbage collector is enabled.')
enabled.set_function(gc.isenabled)
debug = prometheus_client.Gauge(
'python_gc_debug', 'The debug flags currently set on the Python GC.')
debug.set_function(gc.get_debug)
count = prometheus_client.Gauge(
'python_gc_count',
'Count of objects tracked by the Python garbage collector, by generation.',
['generation'])
set_function_on_map_gauge(count, (0, 1, 2), gc.get_count)
thresholds = prometheus_client.Gauge(
'python_gc_threshold',
'GC thresholds by generation',
['generation'])
set_function_on_map_gauge(thresholds, (0, 1, 2), gc.get_threshold)
if sys.version_info >= (3, 4):
# The following 3 metrics are gauges because they come from a
# callback, but their values behave like counters (the values
# returned by gc.get_stats() are counters).
collections = prometheus_client.Gauge(
'python_gc_collections_total',
'Number of GC collections that occurred by generation',
['generation'])
set_function_on_map_gauge(collections, (0, 1, 2), lambda: [
x['collections'] for x in gc.get_stats()])
collected = prometheus_client.Gauge(
'python_gc_collected_total',
'Number of garbage collected objects by generation',
['generation'])
set_function_on_map_gauge(collected, (0, 1, 2), lambda: [
x['collected'] for x in gc.get_stats()])
uncollectables = prometheus_client.Gauge(
'python_gc_uncollectables',
'Number of uncollectable objects by generation',
['generation'])
set_function_on_map_gauge(uncollectables, (0, 1, 2), lambda: [
x['uncollectable'] for x in gc.get_stats()])
| 33.033333 | 79 | 0.696266 | import gc
import prometheus_client
import sys
def set_function_on_map_gauge(gauge, labelvalues, fn):
for l in labelvalues:
def get_item(fn=fn, l=l):
return fn()[l]
gauge.labels(l).set_function(get_item)
enabled = prometheus_client.Gauge(
'python_gc_enabled', 'Whether the garbage collector is enabled.')
enabled.set_function(gc.isenabled)
debug = prometheus_client.Gauge(
'python_gc_debug', 'The debug flags currently set on the Python GC.')
debug.set_function(gc.get_debug)
count = prometheus_client.Gauge(
'python_gc_count',
'Count of objects tracked by the Python garbage collector, by generation.',
['generation'])
set_function_on_map_gauge(count, (0, 1, 2), gc.get_count)
thresholds = prometheus_client.Gauge(
'python_gc_threshold',
'GC thresholds by generation',
['generation'])
set_function_on_map_gauge(thresholds, (0, 1, 2), gc.get_threshold)
if sys.version_info >= (3, 4):
# The following 3 metrics are gauges because they come from a
# callback, but their values behave like counters (the values
# returned by gc.get_stats() are counters).
collections = prometheus_client.Gauge(
'python_gc_collections_total',
'Number of GC collections that occurred by generation',
['generation'])
set_function_on_map_gauge(collections, (0, 1, 2), lambda: [
x['collections'] for x in gc.get_stats()])
collected = prometheus_client.Gauge(
'python_gc_collected_total',
'Number of garbage collected objects by generation',
['generation'])
set_function_on_map_gauge(collected, (0, 1, 2), lambda: [
x['collected'] for x in gc.get_stats()])
uncollectables = prometheus_client.Gauge(
'python_gc_uncollectables',
'Number of uncollectable objects by generation',
['generation'])
set_function_on_map_gauge(uncollectables, (0, 1, 2), lambda: [
x['uncollectable'] for x in gc.get_stats()])
| 167 | 0 | 23 |
42f093d37d94f229684b441b1fff048c975a166f | 1,362 | py | Python | search_ddt.py | CesarSMx/Selenium-with-Python-course | 5df3cc2b20c6c6ecfd9b1ab4821d19197df2d9f0 | [
"MIT"
] | null | null | null | search_ddt.py | CesarSMx/Selenium-with-Python-course | 5df3cc2b20c6c6ecfd9b1ab4821d19197df2d9f0 | [
"MIT"
] | null | null | null | search_ddt.py | CesarSMx/Selenium-with-Python-course | 5df3cc2b20c6c6ecfd9b1ab4821d19197df2d9f0 | [
"MIT"
] | null | null | null | import unittest
from ddt import ddt, data, unpack #installation of this library is required 'pip install ddt'
from pyunitreport import HTMLTestRunner
from selenium import webdriver
@ddt
if __name__ == "__main__":
unittest.main(verbosity = 2) | 30.954545 | 93 | 0.689427 | import unittest
from ddt import ddt, data, unpack #installation of this library is required 'pip install ddt'
from pyunitreport import HTMLTestRunner
from selenium import webdriver
@ddt
class searchDDT(unittest.TestCase):
@classmethod
def setUpClass(cls):
driver_path = r'/mnt/c/Users/cesar/Documents/desarrollo/Selenium/chromedriver.exe'
brave_path = r'C:\program Files\braveSoftware\brave-browser\application\brave.exe'
option = webdriver.ChromeOptions()
option.binary_location = brave_path
cls.driver = webdriver.Chrome(executable_path=driver_path, chrome_options=option)
driver = cls.driver
driver.implicitly_wait(10)
driver.get('http://demo-store.seleniumacademy.com/')
@data(('dress', 6), ('music', 5))
@unpack
def test_search_ddt(self, search_value, expected_count):
driver = self.driver
search_field = driver.find_element_by_name('q')
search_field.clear()
search_field.send_keys(search_value)
search_field.submit()
items = driver.find_elements_by_xpath('//li[@class="item last"]')
print(f'Items found: {len(items)}')
self.assertEqual(len(items), expected_count)
@classmethod
def tearDownClass(cls):
cls.driver.quit()
if __name__ == "__main__":
unittest.main(verbosity = 2) | 911 | 185 | 22 |
bfcd1d5ed629bcbae5910e53ee00843b3f80bd15 | 2,589 | py | Python | quru/server/raft/state/follower.py | ShawnHan1993/quru | 6b103a54d8228e4e2d44b06cc068c60a44b02d67 | [
"MIT"
] | null | null | null | quru/server/raft/state/follower.py | ShawnHan1993/quru | 6b103a54d8228e4e2d44b06cc068c60a44b02d67 | [
"MIT"
] | null | null | null | quru/server/raft/state/follower.py | ShawnHan1993/quru | 6b103a54d8228e4e2d44b06cc068c60a44b02d67 | [
"MIT"
] | null | null | null | import asyncio
import random
import typing
from aiozipkin.span import SpanAbc
from ....quru_logger import logger
from ....env import HEARTBEAT_INTERVAL
from ....words import RaftLog
from ..timer import Timer
from .base import BaseState, log_consistency_check, candidate_qualification
| 32.3625 | 75 | 0.617999 | import asyncio
import random
import typing
from aiozipkin.span import SpanAbc
from ....quru_logger import logger
from ....env import HEARTBEAT_INTERVAL
from ....words import RaftLog
from ..timer import Timer
from .base import BaseState, log_consistency_check, candidate_qualification
class Follower(BaseState):
def start(self) -> asyncio.Task:
if self._trace is not None:
self._trace.finish()
self._trace = None
logger.info("Listening_as_a_follower...", name=self._core.name)
lower = HEARTBEAT_INTERVAL * 3
upper = HEARTBEAT_INTERVAL * 5
self._election_timer = Timer(
lambda: random.randint(lower, upper) * 0.001,
self._to_candidate)
return self._election_timer.start()
def stop(self):
self._leader_id = None
self._voted_for = None
self._election_timer.stop()
async def on_request_store(self, data, span: SpanAbc = None):
return await self._core.call(
self._leader_id,
"reqsto",
data,
span
)
async def _to_candidate(self):
logger.info(
"Not_receiving_hb_from_leader_for_long_time.",
name=self._core.name,
leader=self._leader_id)
self._core.to_candidate()
@candidate_qualification
def on_request_vote(self, data, span: SpanAbc):
term = data['term']
candidate_id = data['candidate_id']
self._current_term = term
self._voted_for = candidate_id
self._election_timer.reset()
logger.info(
"Vote_for_{}".format(candidate_id),
i_am=self._core.name,
state=self.__class__.__name__,
span=span)
return True
@log_consistency_check
def on_append_entries(self, data: dict, span: SpanAbc):
self._leader_id = data['leader_id']
if self._voted_for is not None:
self._voted_for = None
entries: typing.List[RaftLog] = data['entries']
leader_commit: int = data['leader_commit']
self._core.ledger.replace(data['prev_log'].index + 1, entries)
if self._commit_index < leader_commit:
for i in range(self._commit_index, leader_commit + 1):
log = self._core.ledger[i]
self._core.apply(log)
self._commit_index += 1
self._current_term = data['term']
self._election_timer.reset()
logger.debug(
"Rec_good_appent_from_{}".format(data['leader_id']), span=span)
return True, []
| 2,056 | 223 | 23 |
d68c5dbd44b92daf56343922dc9846a028f1a0a6 | 233 | py | Python | blender_plugin/Blueprint3DJSBPY/properties.py | Yurii-Baluk/three.js | dac4518e57d6f5e5d5c55b9c51a0150afcc1abf6 | [
"MIT"
] | 1 | 2021-02-26T06:28:42.000Z | 2021-02-26T06:28:42.000Z | blender_plugin/Blueprint3DJSBPY/properties.py | ue007/blueprint-js | a204bdb74286ddaaf55cf9ec57f6898c91ee2185 | [
"MIT"
] | null | null | null | blender_plugin/Blueprint3DJSBPY/properties.py | ue007/blueprint-js | a204bdb74286ddaaf55cf9ec57f6898c91ee2185 | [
"MIT"
] | null | null | null | import bpy; | 33.285714 | 140 | 0.751073 | import bpy;
def register():
bpy.types.Scene.bp3djs_project_file = bpy.props.StringProperty(name="Blueprint3D JS Zip", subtype="FILE_PATH", default="//project.zip");
def unregister():
del bpy.types.Scene.bp3djs_project_file; | 176 | 0 | 46 |
5bf105e34387702635196065c7b0237e8b44f213 | 14,792 | py | Python | train_cifar10.py | DOHA-HWANG/vision-transformers-cifar10 | 07e81fd705707063424226621513cff728979c9e | [
"MIT"
] | null | null | null | train_cifar10.py | DOHA-HWANG/vision-transformers-cifar10 | 07e81fd705707063424226621513cff728979c9e | [
"MIT"
] | null | null | null | train_cifar10.py | DOHA-HWANG/vision-transformers-cifar10 | 07e81fd705707063424226621513cff728979c9e | [
"MIT"
] | 1 | 2022-03-11T19:23:41.000Z | 2022-03-11T19:23:41.000Z | # Reference Codes
# https://github.com/kentaroy47/vision-transformers-cifar10
# https://github.com/FrancescoSaverioZuppichini/ViT
# https://github.com/lucidrains/vit-pytorch
#Lib import
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import numpy as np
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import pandas as pd
import csv
import time
from torchvision.utils import save_image
from timm.models import create_model
from models import *
from models.vit import ViT
from utils import progress_bar
from models.convmixer import ConvMixer
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.data import Mixup
from dataset import my_Cifar10
from distillation_loss import DistillationLoss
# from models.CIFAR10.custom_models_cifar10 import resnet50
import pdb
# parsers
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate') # resnets.. 1e-3, Vit..1e-4?
parser.add_argument('--opt', default="adam")
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--aug', action='store_true', help='use randomaug') # store_true : False
parser.add_argument('--amp', action='store_true', help='enable AMP training')
# parser.add_argument('--mixup', action='store_true', help='add mixup augumentations')
parser.add_argument('--net', type=str, default='vit')
parser.add_argument('--bs', type=int, default='256')
parser.add_argument('--size', type=int, default="32")
parser.add_argument('--classes', type=int, default="10")
parser.add_argument('--hidden_dim', type=int, default="512")
parser.add_argument('--encoder_blocks', type=int, default="6")
parser.add_argument('--mha_head_cnt', type=int, default="8")
parser.add_argument('--n_epochs', type=int, default='50')
parser.add_argument('--patch', default='4', type=int)
parser.add_argument('--convkernel', default='8', type=int)
parser.add_argument('--cos', action='store_false', help='Train with cosine annealing scheduling')
# Distillation parameters
parser.add_argument('--teacher-model', default='regnety_160', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='hard', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# smooding
parser.add_argument('--smoothing', action='store_true', help='use smoothing')
# check quantization, not implemented for ViT
parser.add_argument('--is_quant', type=int, default=0, help='0: no quant or 1: quant')
# parser.add_argument('--dataset', default="cifar10")
args = parser.parse_args()
# Use wandb for visualize & debug
# User guide(Korean): https://greeksharifa.github.io/references/2020/06/10/wandb-usage/
# take in args
import wandb
watermark = "{}_lr{}".format(args.net, args.lr)
if args.amp:
watermark += "_useamp"
wandb.init(project="cifar10-challange",
name=watermark)
wandb.config.update(args)
# Use albumentations for image augmentations
# User guide(Korean): https://hoya012.github.io/blog/albumentation_tutorial/
print('aug: ', args.aug)
if args.aug:
import albumentations
bs = int(args.bs)
imsize = int(args.size)
use_amp = args.amp
if args.net=="vit_timm_large":
size = 384
elif args.net=="vit_timm_small" or args.net=="vit_timm_base":
size = 224
else:
size = imsize
# Load dataset
train_dataset, test_dataset, train_dataloader, test_dataloader = my_Cifar10(imageSize=size, aug=args.aug)
print('train_dataset', len(train_dataset))
print('test_dataset', len(test_dataset))
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Check sample image
dataiter = iter(train_dataloader)
images, labels = dataiter.next()
print(images.shape)
img1 = images[0]
print('label', classes[labels[0]])
save_image(img1, "./visualize/cifar10_sample1_{}.png".format(classes[labels[0]]))
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
# Model
print('==> Building model..')
# net = VGG('VGG19')
if args.net=='res18':
net = ResNet18()
elif args.net=='vgg':
net = VGG('VGG19')
elif args.net=='res34':
net = ResNet34()
elif args.net=='res50':
net = ResNet50()
elif args.net=='res101':
net = ResNet101()
elif args.net=="convmixer":
# from paper, accuracy >96%. you can tune the depth and dim to scale accuracy and speed.
net = ConvMixer(256, 16, kernel_size=args.convkernel, patch_size=1, n_classes=10)
elif args.net=="vit":
# ViT for cifar10
net = ViT(
image_size = args.size,
patch_size = args.patch,
num_classes = args.classes,
dim = args.hidden_dim,
depth = args.encoder_blocks,
heads = args.mha_head_cnt,
mlp_dim = args.hidden_dim,
dropout = 0.1,
emb_dropout = 0.1,
distilled = False,
# teacher_model=None,
)
elif args.net=="deit":
# DeiT for cifar10
# load teacher model
teacher_model = ResNet50()
teacher_checkpoint = torch.load("checkpoint/res50-4-ckpt.t7")
teacher_model.load_state_dict(teacher_checkpoint['model'])
teacher_model.to(device)
teacher_model.eval()
# import timm
# teacher_model = None
# if args.distillation_type != 'none':
# assert args.teacher_path, 'need to specify teacher-path when using distillation'
# print(f"Creating teacher model: {args.teacher_model}")
# teacher_model = create_model(
# args.teacher_model,
# pretrained=False,
# num_classes=args.classes,
# global_pool='avg',
# )
# if args.teacher_path.startswith('https'):
# checkpoint = torch.hub.load_state_dict_from_url(
# args.teacher_path, map_location='cpu', check_hash=True)
# else:
# checkpoint = torch.load(args.teacher_path, map_location='cpu')
# teacher_model.load_state_dict(checkpoint['model'])
# teacher_model.to(device)
# teacher_model.eval()
net = ViT(
image_size = args.size,
patch_size = args.patch,
num_classes = args.classes,
dim = args.hidden_dim,
depth = args.encoder_blocks,
heads = args.mha_head_cnt,
mlp_dim = args.hidden_dim,
dropout = 0.1,
emb_dropout = 0.1,
distilled = True,
)
elif args.net=="vit_timm_large" or args.net=="vit_timm_base" or args.net=="vit_timm_small":
import timm
print("Available Vision Transformer Models: ")
print(timm.list_models("vit*"))
if args.net=="vit_timm_base":
net = timm.create_model("vit_base_patch16_224", pretrained=True)
elif args.net=="vit_timm_small":
net = timm.create_model("vit_small_patch16_224", pretrained=True)
elif args.net=="vit_timm_large":
net = timm.create_model("vit_large_patch16_384", pretrained=True)
net.head = nn.Linear(net.head.in_features, 10)
# # fix the seed for reproducibility
# seed = args.seed + utils.get_rank()
# torch.manual_seed(seed)
# np.random.seed(seed)
# # random.seed(seed)
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
net = net.to(device)
# if device == 'cuda':
# net = nn.DataParallel(net) # make parallel
# cudnn.benchmark = True
print('resume: ', args.resume)
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/{}-ckpt.t7'.format(args.net))
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.classes)
# Loss is CE
# if args.net!="deit":
# criterion = nn.CrossEntropyLoss()
# else:
# if mixup_active:
# # smoothing is handled with mixup label transform
# criterion = SoftTargetCrossEntropy()
# elif args.smoothing:
# criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
# else:
# criterion = nn.CrossEntropyLoss()
criterion = nn.CrossEntropyLoss()
if args.net=="deit":
# wrap the criterion in our custom DistillationLoss, which
# just dispatches to the original criterion if args.distillation_type is 'none'
criterion = DistillationLoss(
criterion, teacher_model, args.distillation_type, args.distillation_alpha, args.distillation_tau
)
if args.opt == "adam":
optimizer = optim.Adam(net.parameters(), lr=args.lr)
elif args.opt == "sgd":
optimizer = optim.SGD(net.parameters(), lr=args.lr)
# use cosine or reduce LR on Plateau scheduling
if not args.cos:
from torch.optim import lr_scheduler
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=3, verbose=True, min_lr=1e-3*1e-5, factor=0.1)
else:
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.n_epochs)
if args.cos:
wandb.config.scheduler = "cosine"
else:
wandb.config.scheduler = "ReduceLROnPlateau"
##### Training
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
##### Validation
list_loss = []
list_acc = []
wandb.watch(net)
for epoch in range(start_epoch, args.n_epochs):
start = time.time()
trainloss = train(epoch)
val_loss, acc = test(epoch)
if args.cos:
scheduler.step(epoch-1)
list_loss.append(val_loss)
list_acc.append(acc)
# Log training..
wandb.log({'epoch': epoch, 'train_loss': trainloss, 'val_loss': val_loss, "val_acc": acc, "lr": optimizer.param_groups[0]["lr"],
"epoch_time": time.time()-start})
# Write out csv..
with open(f'log/log_{args.net}_patch{args.patch}.csv', 'w') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow(list_loss)
writer.writerow(list_acc)
print(list_loss)
# writeout wandb
wandb.save("wandb_{}.h5".format(args.net)) | 35.303103 | 139 | 0.666441 | # Reference Codes
# https://github.com/kentaroy47/vision-transformers-cifar10
# https://github.com/FrancescoSaverioZuppichini/ViT
# https://github.com/lucidrains/vit-pytorch
#Lib import
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import numpy as np
import torchvision
import torchvision.transforms as transforms
import os
import argparse
import pandas as pd
import csv
import time
from torchvision.utils import save_image
from timm.models import create_model
from models import *
from models.vit import ViT
from utils import progress_bar
from models.convmixer import ConvMixer
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.data import Mixup
from dataset import my_Cifar10
from distillation_loss import DistillationLoss
# from models.CIFAR10.custom_models_cifar10 import resnet50
import pdb
# parsers
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate') # resnets.. 1e-3, Vit..1e-4?
parser.add_argument('--opt', default="adam")
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
parser.add_argument('--aug', action='store_true', help='use randomaug') # store_true : False
parser.add_argument('--amp', action='store_true', help='enable AMP training')
# parser.add_argument('--mixup', action='store_true', help='add mixup augumentations')
parser.add_argument('--net', type=str, default='vit')
parser.add_argument('--bs', type=int, default='256')
parser.add_argument('--size', type=int, default="32")
parser.add_argument('--classes', type=int, default="10")
parser.add_argument('--hidden_dim', type=int, default="512")
parser.add_argument('--encoder_blocks', type=int, default="6")
parser.add_argument('--mha_head_cnt', type=int, default="8")
parser.add_argument('--n_epochs', type=int, default='50')
parser.add_argument('--patch', default='4', type=int)
parser.add_argument('--convkernel', default='8', type=int)
parser.add_argument('--cos', action='store_false', help='Train with cosine annealing scheduling')
# Distillation parameters
parser.add_argument('--teacher-model', default='regnety_160', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher-path', type=str, default='')
parser.add_argument('--distillation-type', default='hard', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# smooding
parser.add_argument('--smoothing', action='store_true', help='use smoothing')
# check quantization, not implemented for ViT
parser.add_argument('--is_quant', type=int, default=0, help='0: no quant or 1: quant')
# parser.add_argument('--dataset', default="cifar10")
args = parser.parse_args()
# Use wandb for visualize & debug
# User guide(Korean): https://greeksharifa.github.io/references/2020/06/10/wandb-usage/
# take in args
import wandb
watermark = "{}_lr{}".format(args.net, args.lr)
if args.amp:
watermark += "_useamp"
wandb.init(project="cifar10-challange",
name=watermark)
wandb.config.update(args)
# Use albumentations for image augmentations
# User guide(Korean): https://hoya012.github.io/blog/albumentation_tutorial/
print('aug: ', args.aug)
if args.aug:
import albumentations
bs = int(args.bs)
imsize = int(args.size)
use_amp = args.amp
if args.net=="vit_timm_large":
size = 384
elif args.net=="vit_timm_small" or args.net=="vit_timm_base":
size = 224
else:
size = imsize
# Load dataset
train_dataset, test_dataset, train_dataloader, test_dataloader = my_Cifar10(imageSize=size, aug=args.aug)
print('train_dataset', len(train_dataset))
print('test_dataset', len(test_dataset))
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Check sample image
dataiter = iter(train_dataloader)
images, labels = dataiter.next()
print(images.shape)
img1 = images[0]
print('label', classes[labels[0]])
save_image(img1, "./visualize/cifar10_sample1_{}.png".format(classes[labels[0]]))
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
# Model
print('==> Building model..')
# net = VGG('VGG19')
if args.net=='res18':
net = ResNet18()
elif args.net=='vgg':
net = VGG('VGG19')
elif args.net=='res34':
net = ResNet34()
elif args.net=='res50':
net = ResNet50()
elif args.net=='res101':
net = ResNet101()
elif args.net=="convmixer":
# from paper, accuracy >96%. you can tune the depth and dim to scale accuracy and speed.
net = ConvMixer(256, 16, kernel_size=args.convkernel, patch_size=1, n_classes=10)
elif args.net=="vit":
# ViT for cifar10
net = ViT(
image_size = args.size,
patch_size = args.patch,
num_classes = args.classes,
dim = args.hidden_dim,
depth = args.encoder_blocks,
heads = args.mha_head_cnt,
mlp_dim = args.hidden_dim,
dropout = 0.1,
emb_dropout = 0.1,
distilled = False,
# teacher_model=None,
)
elif args.net=="deit":
# DeiT for cifar10
# load teacher model
teacher_model = ResNet50()
teacher_checkpoint = torch.load("checkpoint/res50-4-ckpt.t7")
teacher_model.load_state_dict(teacher_checkpoint['model'])
teacher_model.to(device)
teacher_model.eval()
# import timm
# teacher_model = None
# if args.distillation_type != 'none':
# assert args.teacher_path, 'need to specify teacher-path when using distillation'
# print(f"Creating teacher model: {args.teacher_model}")
# teacher_model = create_model(
# args.teacher_model,
# pretrained=False,
# num_classes=args.classes,
# global_pool='avg',
# )
# if args.teacher_path.startswith('https'):
# checkpoint = torch.hub.load_state_dict_from_url(
# args.teacher_path, map_location='cpu', check_hash=True)
# else:
# checkpoint = torch.load(args.teacher_path, map_location='cpu')
# teacher_model.load_state_dict(checkpoint['model'])
# teacher_model.to(device)
# teacher_model.eval()
net = ViT(
image_size = args.size,
patch_size = args.patch,
num_classes = args.classes,
dim = args.hidden_dim,
depth = args.encoder_blocks,
heads = args.mha_head_cnt,
mlp_dim = args.hidden_dim,
dropout = 0.1,
emb_dropout = 0.1,
distilled = True,
)
elif args.net=="vit_timm_large" or args.net=="vit_timm_base" or args.net=="vit_timm_small":
import timm
print("Available Vision Transformer Models: ")
print(timm.list_models("vit*"))
if args.net=="vit_timm_base":
net = timm.create_model("vit_base_patch16_224", pretrained=True)
elif args.net=="vit_timm_small":
net = timm.create_model("vit_small_patch16_224", pretrained=True)
elif args.net=="vit_timm_large":
net = timm.create_model("vit_large_patch16_384", pretrained=True)
net.head = nn.Linear(net.head.in_features, 10)
# # fix the seed for reproducibility
# seed = args.seed + utils.get_rank()
# torch.manual_seed(seed)
# np.random.seed(seed)
# # random.seed(seed)
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
net = net.to(device)
# if device == 'cuda':
# net = nn.DataParallel(net) # make parallel
# cudnn.benchmark = True
print('resume: ', args.resume)
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/{}-ckpt.t7'.format(args.net))
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.classes)
# Loss is CE
# if args.net!="deit":
# criterion = nn.CrossEntropyLoss()
# else:
# if mixup_active:
# # smoothing is handled with mixup label transform
# criterion = SoftTargetCrossEntropy()
# elif args.smoothing:
# criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
# else:
# criterion = nn.CrossEntropyLoss()
criterion = nn.CrossEntropyLoss()
if args.net=="deit":
# wrap the criterion in our custom DistillationLoss, which
# just dispatches to the original criterion if args.distillation_type is 'none'
criterion = DistillationLoss(
criterion, teacher_model, args.distillation_type, args.distillation_alpha, args.distillation_tau
)
if args.opt == "adam":
optimizer = optim.Adam(net.parameters(), lr=args.lr)
elif args.opt == "sgd":
optimizer = optim.SGD(net.parameters(), lr=args.lr)
# use cosine or reduce LR on Plateau scheduling
if not args.cos:
from torch.optim import lr_scheduler
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=3, verbose=True, min_lr=1e-3*1e-5, factor=0.1)
else:
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.n_epochs)
if args.cos:
wandb.config.scheduler = "cosine"
else:
wandb.config.scheduler = "ReduceLROnPlateau"
##### Training
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
def train(epoch):
print('\nEpoch: %d' % epoch)
net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(train_dataloader):
inputs, targets = inputs.to(device), targets.to(device)
# pdb.set_trace()
if args.net=="deit" and mixup_fn is not None:
inputs, targets = mixup_fn(inputs, targets)
# Train with amp
with torch.cuda.amp.autocast(enabled=use_amp):
if args.net=="deit" or args.net=="vit":
outputs = net(inputs, training = True) # outputs: cls, dist in deit model
else:
outputs = net(inputs)
if args.net=="deit":
loss = criterion(inputs, outputs, targets)
else:
loss = criterion(outputs, targets)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
train_loss += loss.item()
if args.net=="deit":
_, predicted = outputs[0].max(1)
total += targets.size(0)
_, max_target = targets.max(1)
correct += predicted.eq(max_target).sum().item()
else:
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(train_dataloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
return train_loss/(batch_idx+1)
##### Validation
def test(epoch):
global best_acc
net.eval()
test_loss = 0
correct = 0
total = 0
criterion = nn.CrossEntropyLoss()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_dataloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = net(inputs)
loss = criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
progress_bar(batch_idx, len(test_dataloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
# Update scheduler
if not args.cos:
scheduler.step(test_loss)
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
print('Saving..')
state = {"model": net.state_dict(),
"optimizer": optimizer.state_dict(),
"scaler": scaler.state_dict()}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
print(f'save: {args.net}, acc: {acc}')
torch.save(state, './checkpoint/'+args.net+'-{}-ckpt.t7'.format(args.patch))
best_acc = acc
os.makedirs("log", exist_ok=True)
content = time.ctime() + ' ' + f'Epoch {epoch}, lr: {optimizer.param_groups[0]["lr"]:.7f}, val loss: {test_loss:.5f}, acc: {(acc):.5f}'
print(content)
with open(f'log/log_{args.net}_patch{args.patch}.txt', 'a') as appender:
appender.write(content + "\n")
return test_loss, acc
list_loss = []
list_acc = []
wandb.watch(net)
for epoch in range(start_epoch, args.n_epochs):
start = time.time()
trainloss = train(epoch)
val_loss, acc = test(epoch)
if args.cos:
scheduler.step(epoch-1)
list_loss.append(val_loss)
list_acc.append(acc)
# Log training..
wandb.log({'epoch': epoch, 'train_loss': trainloss, 'val_loss': val_loss, "val_acc": acc, "lr": optimizer.param_groups[0]["lr"],
"epoch_time": time.time()-start})
# Write out csv..
with open(f'log/log_{args.net}_patch{args.patch}.csv', 'w') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow(list_loss)
writer.writerow(list_acc)
print(list_loss)
# writeout wandb
wandb.save("wandb_{}.h5".format(args.net)) | 3,165 | 0 | 44 |
3680d19968d83724942a382f5614bdbc76584b9b | 1,753 | py | Python | scripts/aggregate_csv.py | itko/scanbox | 9a00c11eafb4cc2faa69bfcc76bdf0d8e295dcf3 | [
"BSD-3-Clause"
] | 1 | 2020-01-09T09:30:23.000Z | 2020-01-09T09:30:23.000Z | scripts/aggregate_csv.py | itko/scanbox | 9a00c11eafb4cc2faa69bfcc76bdf0d8e295dcf3 | [
"BSD-3-Clause"
] | 23 | 2018-03-19T20:54:52.000Z | 2018-05-16T12:36:59.000Z | scripts/aggregate_csv.py | itko/scanbox | 9a00c11eafb4cc2faa69bfcc76bdf0d8e295dcf3 | [
"BSD-3-Clause"
] | 1 | 2018-03-14T20:00:43.000Z | 2018-03-14T20:00:43.000Z | """
Goes over a directory, looks for all matching filenames (csv's), picks one row and writes them to another file.
"""
import re, sys, os
import polygon2cog as p2c
import validation as v
if __name__ == "__main__":
# line index we want to collect
TARGET_LINE = 0
# priority list of file keys, the first one found is taken
file_keys = ["cluster_cogs", "controlPoints"]
regex_str = "_(?P<frameIndex>\d+)\.csv"
if len(sys.argv) >= 3:
src_dir = os.path.abspath(sys.argv[1])
out_dir = os.path.abspath(sys.argv[2])
if len(sys.argv) < 3 or not os.path.isdir(src_dir):
print('Usage: \n\
Argument 1: source directory with files to aggregate\n\
Argument 2: output directory')
frame_indices = [[] for i in range(len(file_keys))]
file_key = "notFound"
# search for file key
for i in range(len(file_keys)):
file_key_t = file_keys[i]
frame_indices[i] = p2c.findFrameIndices(re.compile(file_key_t + regex_str), 'frameIndex', src_dir)
file_key = file_keys[i]
# choose the first non-empty one
for i in range(len(frame_indices)):
if len(frame_indices[i]) > 0:
frame_indices = frame_indices[i]
file_key = file_keys[i]
break
frame_indices = sorted(frame_indices)
agg = []
for f in frame_indices:
path = os.path.join(src_dir, file_key + "_" + str(f) + ".csv")
lines = v.read_controlpoints(path)
row = lines[TARGET_LINE].T.tolist()[0]
row.insert(0, f)
agg.append(row)
print(agg)
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
out_file = os.path.join(out_dir, 'aggregated.csv')
p2c.write_csv(out_file, agg)
| 33.711538 | 111 | 0.62065 | """
Goes over a directory, looks for all matching filenames (csv's), picks one row and writes them to another file.
"""
import re, sys, os
import polygon2cog as p2c
import validation as v
if __name__ == "__main__":
# line index we want to collect
TARGET_LINE = 0
# priority list of file keys, the first one found is taken
file_keys = ["cluster_cogs", "controlPoints"]
regex_str = "_(?P<frameIndex>\d+)\.csv"
if len(sys.argv) >= 3:
src_dir = os.path.abspath(sys.argv[1])
out_dir = os.path.abspath(sys.argv[2])
if len(sys.argv) < 3 or not os.path.isdir(src_dir):
print('Usage: \n\
Argument 1: source directory with files to aggregate\n\
Argument 2: output directory')
frame_indices = [[] for i in range(len(file_keys))]
file_key = "notFound"
# search for file key
for i in range(len(file_keys)):
file_key_t = file_keys[i]
frame_indices[i] = p2c.findFrameIndices(re.compile(file_key_t + regex_str), 'frameIndex', src_dir)
file_key = file_keys[i]
# choose the first non-empty one
for i in range(len(frame_indices)):
if len(frame_indices[i]) > 0:
frame_indices = frame_indices[i]
file_key = file_keys[i]
break
frame_indices = sorted(frame_indices)
agg = []
for f in frame_indices:
path = os.path.join(src_dir, file_key + "_" + str(f) + ".csv")
lines = v.read_controlpoints(path)
row = lines[TARGET_LINE].T.tolist()[0]
row.insert(0, f)
agg.append(row)
print(agg)
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
out_file = os.path.join(out_dir, 'aggregated.csv')
p2c.write_csv(out_file, agg)
| 0 | 0 | 0 |
f17130cf728225b39db7ab83a6cc8660f0d8c381 | 1,129 | py | Python | main.py | leobouts/Skyline_top_k_queries | 5f5e8ab8f5e521dc20f33a69dd042917ff5d42f0 | [
"MIT"
] | null | null | null | main.py | leobouts/Skyline_top_k_queries | 5f5e8ab8f5e521dc20f33a69dd042917ff5d42f0 | [
"MIT"
] | null | null | null | main.py | leobouts/Skyline_top_k_queries | 5f5e8ab8f5e521dc20f33a69dd042917ff5d42f0 | [
"MIT"
] | null | null | null | from a_top_k import *
from b_top_k import *
import sys
import time
if __name__ == "__main__":
main()
| 23.520833 | 117 | 0.615589 | from a_top_k import *
from b_top_k import *
import sys
import time
def main():
# top-k input given from terminal argument
k = sys.argv[1]
number_of_valid_lines = []
# generator that yields every k for the a-join algorithm (HRJN) and b-join
top_k_a_generator = generate_top_join_a(number_of_valid_lines)
top_k_b_generator = generate_top_join_b(number_of_valid_lines)
# test the generator for the top-k input
# starting time
start_time = time.time()
for i in range(int(k)):
top_k = next(top_k_a_generator)
print(str(i+1)+'. pair: '+str(top_k[1][0][0])+','+str(top_k[1][1][0])+' score:'+str(-top_k[0]))
top_k_time = time.time() - start_time
print('time in seconds:', top_k_time)
print('===================')
start_time = time.time()
for i in range(int(k)):
top_k = next(top_k_b_generator)
print(str(i + 1) + '. pair: ' + str(top_k[1][0][0]) + ',' + str(top_k[1][1][0]) + ' score:' + str(-top_k[0]))
top_k_time = time.time() - start_time
print('time in seconds:', top_k_time)
if __name__ == "__main__":
main()
| 998 | 0 | 23 |
d8776829df3ab0aeeb99e6e0299de710b503e3ac | 790 | py | Python | fishtank/main.py | craighagan/BenPythonProjects | b086829767c0a61ee003c5396edb38a35468e10e | [
"MIT"
] | null | null | null | fishtank/main.py | craighagan/BenPythonProjects | b086829767c0a61ee003c5396edb38a35468e10e | [
"MIT"
] | null | null | null | fishtank/main.py | craighagan/BenPythonProjects | b086829767c0a61ee003c5396edb38a35468e10e | [
"MIT"
] | 1 | 2021-01-10T20:01:19.000Z | 2021-01-10T20:01:19.000Z | import fishtank
from machine import Pin, I2C
import machine
import time
import ssd1306
from adafruit_mqtt import AdafruitMQTTClient
import passwords
try:
i2c = I2C(-1, scl=Pin(22), sda=Pin(21))
oled_width = 128
oled_height = 64
oled = ssd1306.SSD1306_I2C(oled_width, oled_height, i2c)
sensor = fishtank.FishtankSensor(4)
mqtt_client = AdafruitMQTTClient(passwords.adafruit_io_url,
passwords.adafruit_io_username,
passwords.adafruit_io_key)
webserver = fishtank.FishtankWebserver(sensor, oled, mqtt_client=mqtt_client)
webserver.start()
except Exception as e:
print("something has gone wrong %s\nrebooting in 30 seconds" % str(e))
time.sleep(30)
machine.reset()
| 23.235294 | 81 | 0.677215 | import fishtank
from machine import Pin, I2C
import machine
import time
import ssd1306
from adafruit_mqtt import AdafruitMQTTClient
import passwords
try:
i2c = I2C(-1, scl=Pin(22), sda=Pin(21))
oled_width = 128
oled_height = 64
oled = ssd1306.SSD1306_I2C(oled_width, oled_height, i2c)
sensor = fishtank.FishtankSensor(4)
mqtt_client = AdafruitMQTTClient(passwords.adafruit_io_url,
passwords.adafruit_io_username,
passwords.adafruit_io_key)
webserver = fishtank.FishtankWebserver(sensor, oled, mqtt_client=mqtt_client)
webserver.start()
except Exception as e:
print("something has gone wrong %s\nrebooting in 30 seconds" % str(e))
time.sleep(30)
machine.reset()
| 0 | 0 | 0 |
b6f31c43090d9eb68adc29c82edd7611cfcd611d | 9,527 | py | Python | ner_pipeline.py | monologg/KoELECTRA-Pipeline | 65f465419d0fffcac2c8df709dc57bf671dc39cd | [
"Apache-2.0"
] | 38 | 2020-05-13T09:34:46.000Z | 2022-01-11T09:04:28.000Z | ner_pipeline.py | odus05/KoELECTRA-Pipeline | 65f465419d0fffcac2c8df709dc57bf671dc39cd | [
"Apache-2.0"
] | 2 | 2020-05-14T02:14:43.000Z | 2020-09-20T14:30:14.000Z | ner_pipeline.py | odus05/KoELECTRA-Pipeline | 65f465419d0fffcac2c8df709dc57bf671dc39cd | [
"Apache-2.0"
] | 6 | 2020-05-25T07:22:05.000Z | 2022-01-06T05:35:24.000Z | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team and Jangwon Park
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional, Union
import torch
import numpy as np
from transformers import (
BasicTokenizer,
PreTrainedTokenizer,
Pipeline,
ModelCard,
is_tf_available,
is_torch_available
)
from transformers.pipelines import ArgumentHandler
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
logger = logging.getLogger(__name__)
class NerPipeline(Pipeline):
"""
Named Entity Recognition pipeline using ModelForTokenClassification head. See the
`named entity recognition usage <../usage.html#named-entity-recognition>`__ examples for more information.
This token recognition pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "ner", for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous.
The models that this pipeline can use are models that have been fine-tuned on a token classification task.
See the list of available community models fine-tuned on such a task on
`huggingface.co/models <https://huggingface.co/models?search=&filter=token-classification>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
default_input_names = "sequences"
| 40.029412 | 120 | 0.628529 | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team and Jangwon Park
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional, Union
import torch
import numpy as np
from transformers import (
BasicTokenizer,
PreTrainedTokenizer,
Pipeline,
ModelCard,
is_tf_available,
is_torch_available
)
from transformers.pipelines import ArgumentHandler
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
logger = logging.getLogger(__name__)
def custom_encode_plus(sentence,
tokenizer,
return_tensors=None):
# {'input_ids': [2, 10841, 10966, 10832, 10541, 21509, 27660, 18, 3], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0]}
words = sentence.split()
tokens = []
tokens_mask = []
for word in words:
word_tokens = tokenizer.tokenize(word)
if not word_tokens:
word_tokens = [tokenizer.unk_token] # For handling the bad-encoded word
tokens.extend(word_tokens)
tokens_mask.extend([1] + [0] * (len(word_tokens) - 1))
ids = tokenizer.convert_tokens_to_ids(tokens)
len_ids = len(ids)
total_len = len_ids + tokenizer.num_special_tokens_to_add()
if tokenizer.max_len and total_len > tokenizer.max_len:
ids, _, _ = tokenizer.truncate_sequences(
ids,
pair_ids=None,
num_tokens_to_remove=total_len - tokenizer.max_len,
truncation_strategy="longest_first",
stride=0,
)
sequence = tokenizer.build_inputs_with_special_tokens(ids)
token_type_ids = tokenizer.create_token_type_ids_from_sequences(ids)
# HARD-CODED: As I know, most of the transformers architecture will be `[CLS] + text + [SEP]``
# Only way to safely cover all the cases is to integrate `token mask builder` in internal library.
tokens_mask = [1] + tokens_mask + [1]
words = [tokenizer.cls_token] + words + [tokenizer.sep_token]
encoded_inputs = {}
encoded_inputs["input_ids"] = sequence
encoded_inputs["token_type_ids"] = token_type_ids
if return_tensors == "tf" and is_tf_available():
encoded_inputs["input_ids"] = tf.constant([encoded_inputs["input_ids"]])
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = tf.constant([encoded_inputs["token_type_ids"]])
if "attention_mask" in encoded_inputs:
encoded_inputs["attention_mask"] = tf.constant([encoded_inputs["attention_mask"]])
elif return_tensors == "pt" and is_torch_available():
encoded_inputs["input_ids"] = torch.tensor([encoded_inputs["input_ids"]])
if "token_type_ids" in encoded_inputs:
encoded_inputs["token_type_ids"] = torch.tensor([encoded_inputs["token_type_ids"]])
if "attention_mask" in encoded_inputs:
encoded_inputs["attention_mask"] = torch.tensor([encoded_inputs["attention_mask"]])
elif return_tensors is not None:
logger.warning(
"Unable to convert output to tensors format {}, PyTorch or TensorFlow is not available.".format(
return_tensors
)
)
return encoded_inputs, words, tokens_mask
class NerPipeline(Pipeline):
"""
Named Entity Recognition pipeline using ModelForTokenClassification head. See the
`named entity recognition usage <../usage.html#named-entity-recognition>`__ examples for more information.
This token recognition pipeline can currently be loaded from the :func:`~transformers.pipeline` method using
the following task identifier(s):
- "ner", for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous.
The models that this pipeline can use are models that have been fine-tuned on a token classification task.
See the list of available community models fine-tuned on such a task on
`huggingface.co/models <https://huggingface.co/models?search=&filter=token-classification>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`, defaults to :obj:`None`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`, defaults to :obj:`None`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to :obj:`-1`):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, >=0 will run the model
on the associated CUDA device id.
"""
default_input_names = "sequences"
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
ignore_labels=["O"],
task: str = "",
ignore_special_tokens: bool = True
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=binary_output,
task=task,
)
self._basic_tokenizer = BasicTokenizer(do_lower_case=False)
self.ignore_labels = ignore_labels
self.ignore_special_tokens = ignore_special_tokens
def __call__(self, *texts, **kwargs):
inputs = self._args_parser(*texts, **kwargs)
answers = []
for sentence in inputs:
# Manage correct placement of the tensors
with self.device_placement():
# [FIX] Split token by word-level
tokens, words, tokens_mask = custom_encode_plus(
sentence,
self.tokenizer,
return_tensors=self.framework
)
# Forward
if self.framework == "tf":
entities = self.model(tokens)[0][0].numpy()
input_ids = tokens["input_ids"].numpy()[0]
else:
with torch.no_grad():
tokens = self.ensure_tensor_on_device(**tokens)
entities = self.model(**tokens)[0][0].cpu().numpy()
input_ids = tokens["input_ids"].cpu().numpy()[0]
score = np.exp(entities) / np.exp(entities).sum(-1, keepdims=True)
labels_idx = score.argmax(axis=-1)
token_level_answer = []
for idx, label_idx in enumerate(labels_idx):
# NOTE Append every answer even though the `entity` is in `ignore_labels`
token_level_answer += [
{
"word": self.tokenizer.convert_ids_to_tokens(int(input_ids[idx])),
"score": score[idx][label_idx].item(),
"entity": self.model.config.id2label[label_idx],
}
]
# [FIX] Now let's change it to word-level NER
word_idx = 0
word_level_answer = []
# NOTE: Might not be safe. BERT, ELECTRA etc. won't make issues.
if self.ignore_special_tokens:
words = words[1:-1]
tokens_mask = tokens_mask[1:-1]
token_level_answer = token_level_answer[1:-1]
for mask, ans in zip(tokens_mask, token_level_answer):
if mask == 1:
ans["word"] = words[word_idx]
word_idx += 1
if ans["entity"] not in self.ignore_labels:
word_level_answer.append(ans)
# Append
answers += [word_level_answer]
if len(answers) == 1:
return answers[0]
return answers
| 5,992 | 0 | 77 |