hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d890dc97219637fe1dd9192b3e2d2068492c1ade
| 3,127
|
py
|
Python
|
final_race/potential_field.py
|
clayshubert/AutonomousVehicles
|
72709885989b2b4266c86d6e5e0a0609b3f4a959
|
[
"MIT"
] | null | null | null |
final_race/potential_field.py
|
clayshubert/AutonomousVehicles
|
72709885989b2b4266c86d6e5e0a0609b3f4a959
|
[
"MIT"
] | null | null | null |
final_race/potential_field.py
|
clayshubert/AutonomousVehicles
|
72709885989b2b4266c86d6e5e0a0609b3f4a959
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
import rospy
import numpy as np
from sensor_msgs.msg import LaserScan
from ackermann_msgs.msg import AckermannDriveStamped
from std_msgs.msg import Int32
class PotentialField:
def __init__(self):
rospy.Subscriber("/scan", LaserScan, self.scanner_callback)
rospy.Subscriber("/control", Int32, self.ctrlcallback)
self.preve = 0
self.message = -1
self.deriv = 0
self.cmd_pub=rospy.Publisher("/ackermann_cmd_mux/input/navigation",AckermannDriveStamped,queue_size=10 )
# self.cmd_pub=rospy.Publisher("/ackermann_cmd", AckermannDriveStamped, queue_size=10)
def ctrlcallback(self, msg):
self.message = msg.data
def scanner_callback(self, msg):
if self.message in [0,2,3,4]:
angle_min = msg.angle_min
angle_inc = msg.angle_increment
ranges = msg.ranges
x_sum = 0
y_sum = 0
self.ks = 0.004
for i in range(len(ranges)):
angle = angle_min + angle_inc * i
if (angle > -1.5 and angle < -0.05) or (angle > 0.05 and angle < 1.5):
distance = ranges[i]
if distance > 10:
distance = 10
x_sum = x_sum + (1/(distance*np.sin(angle)))
if angle > -1.35 and angle < 1.35:
distance = ranges[i]
if distance > 10:
distance = 10
y_sum = y_sum + (1/(distance*np.cos(angle)+0.1))
kp = -0.0015 #kp probably needs to be very small?
if self.message == 2:
#kd for the hairpin
kd = -0.004
kp = -0.0025
self.ks = 0.003
elif self.message == 4:
#final stretch
self.ks = 0.005
kd = -0.009
else:
#regular conditions
kd = -0.0110
#tuning for the bridge
if self.message == 3:
kd = -0.0040
kp = -0.00005
self.ks = 0.0020
self.deriv = x_sum - self.preve
steering_angle = (kp * (x_sum)) + (kd*self.deriv)
self.preve = x_sum
self.message = -1
if steering_angle > 0.3:
steering_angle = 0.3
if steering_angle < -0.3:
steering_angle = -0.3
speed = self.ks*y_sum
#speed = 1
#MIN = (len(ranges)/2)-10
#MAX = (len(ranges)/2)+10
#if speed > 3:
# speed = 3
#if speed < 0:
# speed = .5
#if ranges[(len(ranges)/2)] < 0.3:
#or ranges[(len(ranges)/2)+10] or ranges[(len(ranges)/2)-10] < 0.3:
#speed = -1.5
#steering_angle = -0.3
drive_msg = AckermannDriveStamped()
drive_msg.drive.speed = speed
drive_msg.drive.steering_angle = steering_angle
self.cmd_pub.publish(drive_msg)
if __name__ == "__main__":
rospy.init_node("potential_field")
node = PotentialField()
rospy.spin()
| 27.191304
| 112
| 0.512632
|
6182f03af967cce9bda0bc6bdd0485c12d1b6cea
| 6,761
|
py
|
Python
|
intelligence/settings.py
|
kohhi/exist
|
c688a228ac9fee56ff29990a6d75b2a09f8457ca
|
[
"MIT"
] | null | null | null |
intelligence/settings.py
|
kohhi/exist
|
c688a228ac9fee56ff29990a6d75b2a09f8457ca
|
[
"MIT"
] | null | null | null |
intelligence/settings.py
|
kohhi/exist
|
c688a228ac9fee56ff29990a6d75b2a09f8457ca
|
[
"MIT"
] | null | null | null |
"""
Django settings for intelligence project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
from dotenv import load_dotenv
# Load from .env file
load_dotenv(verbose=True)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('EXIST_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('EXIST_DEBUG_MODE', 'False').lower() in ['true', 'yes', '1']
ALLOWED_HOSTS = os.environ.get('EXIST_ALLOWED_HOSTS').split('|')
# Application definition
INSTALLED_APPS = [
'apps.reputation.apps.ReputationConfig',
'apps.twitter.apps.TwitterConfig',
'apps.twitter_hunter.apps.TwitterHunterConfig',
'apps.exploit.apps.ExploitConfig',
'apps.news.apps.NewsConfig',
'apps.news_hunter.apps.NewsHunterConfig',
'apps.vuln.apps.VulnConfig',
'apps.threat.apps.ThreatConfig',
'apps.threat_hunter.apps.ThreatHunterConfig',
'apps.dashboard.apps.DashboardConfig',
'apps.domain.apps.DomainConfig',
'apps.ip.apps.IpConfig',
'apps.filehash.apps.FilehashConfig',
'apps.url.apps.UrlConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'pure_pagination',
'rest_framework',
'django_filters',
'bootstrap4',
'django_celery_results',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'intelligence.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'libraries':{
'lookup': 'apps.reputation.templatetags.lookup',
}
},
},
]
WSGI_APPLICATION = 'intelligence.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ.get('EXIST_DB_NAME', 'intelligence_db'),
'USER': os.environ.get('EXIST_DB_USER'),
'PASSWORD': os.environ.get('EXIST_DB_PASSWORD'),
'HOST': os.environ.get('EXIST_DB_HOST', 'localhost'),
'PORT': os.environ.get('EXIST_DB_PORT', '3306'),
'OPTIONS': {
'charset': 'utf8mb4',
'init_command': 'SET character_set_connection=utf8mb4;'
'SET collation_connection=utf8mb4_unicode_ci;'
"SET NAMES 'utf8mb4';"
"SET CHARACTER SET utf8mb4;"
},
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = os.environ.get('EXIST_LANGUAGE_CODE', 'ja')
TIME_ZONE = os.environ.get('EXIST_TIME_ZONE', 'Asia/Tokyo')
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static/"),
)
#STATIC_ROOT = os.path.join(BASE_DIR, "static-root/")
# for django-pure-pagination settings
PAGINATION_SETTINGS = {
'PAGE_RANGE_DISPLAYED': 2,
'MARGIN_PAGES_DISPLAYED': 2,
'SHOW_FIRST_PAGE_WHEN_INVALID': True,
}
# format
FORMAT_MODULE_PATH = 'intelligence.formats'
# Rest API
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 50,
}
# intcomma
NUMBER_GROUPING = 3
# Celery
CELERY_RESULT_BACKEND = 'django-db'
# Logging
LOGGING = {
'version': 1,
'formatters': {
'all': {
'format': '\t'.join([
"[%(levelname)s]",
"%(asctime)s",
"module:%(module)s",
"%(pathname)s",
"message:%(message)s",
])
},
'django.server': {
'()': 'django.utils.log.ServerFormatter',
'format': '%(asctime)s %(message)s',
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(BASE_DIR, 'logs/django.log'),
'formatter': 'all',
'maxBytes': 1024 * 1024 * 2,
'backupCount': 10,
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'all'
},
'django.server': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'django.server',
},
},
'loggers': {
'command': {
'handlers': ['file', 'console'],
'level': 'DEBUG',
},
'django.server': {
'handlers': ['django.server'],
'level': 'INFO',
'propagate': False,
},
},
}
| 28.170833
| 91
| 0.626682
|
d102d66e86b25d9137ed578a981bec0ca4ae7aad
| 299
|
py
|
Python
|
hms_tz/hms_tz/doctype/healthcare_insurance_subscription/healthcare_insurance_subscription.py
|
av-dev2/hms_tz
|
a36dbe8bfacf6a770913b1bfa000d43edd2cd87a
|
[
"MIT"
] | 5
|
2021-04-20T06:11:25.000Z
|
2021-11-18T15:37:25.000Z
|
hms_tz/hms_tz/doctype/healthcare_insurance_subscription/healthcare_insurance_subscription.py
|
av-dev2/hms_tz
|
a36dbe8bfacf6a770913b1bfa000d43edd2cd87a
|
[
"MIT"
] | 90
|
2021-04-05T13:36:34.000Z
|
2022-03-31T07:26:25.000Z
|
hms_tz/hms_tz/doctype/healthcare_insurance_subscription/healthcare_insurance_subscription.py
|
av-dev2/hms_tz
|
a36dbe8bfacf6a770913b1bfa000d43edd2cd87a
|
[
"MIT"
] | 10
|
2021-03-26T06:43:20.000Z
|
2022-02-18T06:36:58.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class HealthcareInsuranceSubscription(Document):
pass
| 27.181818
| 68
| 0.789298
|
5f0a3db18fd733019616cf9ec46192e19171f0a3
| 4,379
|
py
|
Python
|
linear_regressor.py
|
Wright4TheJob/CobModelGPR
|
714c8d85d91817bd1abb560359afe4abda116996
|
[
"MIT"
] | null | null | null |
linear_regressor.py
|
Wright4TheJob/CobModelGPR
|
714c8d85d91817bd1abb560359afe4abda116996
|
[
"MIT"
] | null | null | null |
linear_regressor.py
|
Wright4TheJob/CobModelGPR
|
714c8d85d91817bd1abb560359afe4abda116996
|
[
"MIT"
] | null | null | null |
"""Read a comma or tab-separated text file and perform linear regression."""
def read_file(filename):
"""Read two column contents of file as floats."""
import csv
delimiter = "\t"
xs = []
ys = []
with open(filename, 'r') as fin:
next(fin) # skip headings
if delimiter == ',':
reader = csv.reader(fin)
else:
reader = csv.reader(fin, delimiter=delimiter)
for line in reader:
xs.append(read_float(0, line))
ys.append(read_float(1, line))
return (xs, ys)
def read_float(index, to_read):
if index is None:
return None
else:
try:
return float(to_read[index])
except ValueError:
print('Float conversion error')
def dot(K, L):
if len(K) != len(L):
return 0
return sum(i[0] * i[1] for i in zip(K, L))
def linear_regression(x_list, y_list):
"""Perform regression analysis on array of x values an list of y values"""
import numpy as np
from numpy import dot
from numpy.linalg import inv
from scipy import stats
import math
n = len(y_list)
x_array = np.atleast_2d(np.array([np.ones(n), x_list])).transpose()
# ######### Calculate B values ############
# Solve for B matrix through pseudo-inverse
yArray = np.array(y_list).transpose()
XTX = dot(x_array.transpose(), x_array)
# print("X transpose * X = " + repr(XTX))
XTy = dot(x_array.transpose(), yArray)
# print("X transpose * y" + repr(XTy))
# print(XTX)
invXTX = inv(XTX)
# print("inv(X transpose * X) = " + repr(invXTX))
B = dot(invXTX, XTy)
# print("Beta Array (Estimated Parameters) = " + repr(B))
dof = len(yArray) - len(B)
# ############ Calculate checking statistics #############
yhat = dot(x_array, B)
# print("Predicted value of Y = " + repr(yhat))
residuals = yArray-yhat
# print("Residuals (y-yhat) = " + repr(residuals))
SSE = dot(residuals.transpose(), residuals)
SSE = SSE
# print("Sum of Squares of the Residuals, SSE = " + repr(SSE))
residualVariance = SSE/dof
parameterCOV = invXTX*residualVariance
# print("Parameter Covariance = " + repr(parameterCOV))
SSR = (dot(dot(B.transpose(), x_array.transpose()), yArray)
- sum(yArray)**2/n)
SSR = SSR
# print("Sum of Squares of the Regression, SSR = " + repr(SSR))
SST = dot(yArray.transpose(), yArray) - sum(yArray)**2/n
SST = SST
# print("Total sum of squares = " + repr(SST))
dofSST = n - 1
dofSSR = len(B)
dofSSE = dofSST - dofSSR
p = len(B)
SigmaSquared = SSE/(n-p)
# print('sigma^2$ = %3.4f' % (SigmaSquared))
# ########### Hypothesis Test Beta terms ###############
alpha = 1-0.95
rsquared = 1 - SSE/SST
print("R Squared = %1.5f" % (rsquared))
Sxx = []
for i in range(0, n):
Sxx.append(sum((x_array[i, :] - np.mean(x_array[i, :]))**2))
# print("MSE = " + repr(MSE))
# print("Sxx = " + repr(Sxx))
t0 = [100.1]
for i in range(1, len(B)):
t0.append(B[i]/math.sqrt(SigmaSquared*invXTX[i, i]))
# print("t0 values for Beta terms = " + repr(t0))
# reject null hypothesis if |tValue| > t(alpha/2,n-1)
# Equations from page 310, Chapter 13.3 in (old) book
tStatistic = stats.distributions.t.ppf(alpha/2, n - 1)
Bsig = []
for i in range(1, len(B)):
if abs(t0[i]) > abs(tStatistic):
Bsig.append(1)
# print("B" + repr(i) + " is significant")
else:
Bsig.append(0)
# print("B" + repr(i) + " is not significant")
# ############# Confidence intervals for Beta values ##################
print("Confidence Interval for Beta Values")
for i, B1, C in zip(range(len(B)), B, np.diagonal(parameterCOV)):
lowB = B1 - stats.t.ppf(1-alpha/2, dofSSE) * math.sqrt(C)
highB = B1 + stats.t.ppf(1-alpha/2, dofSSE) * math.sqrt(C)
print("B%i = %g" % (i, B1))
print("%g < B%i < %g with 95%% confidence" % (lowB, i, highB))
def main():
import sys
try:
filename = sys.argv[1]
except IndexError:
print('Please provide a file name.')
return
(x_list, y_list) = read_file(filename)
linear_regression(x_list, y_list)
if __name__ == "__main__":
main()
| 27.89172
| 78
| 0.558575
|
5b166581b0c528fe56f943ab5064887471a0a4c9
| 4,691
|
py
|
Python
|
tests/_old/test_epeso.py
|
stevenkfirth/eprun
|
2a580f8ac0b5976cb1bc84328ffb821bd31731e6
|
[
"MIT"
] | 5
|
2021-05-22T19:13:13.000Z
|
2022-03-07T04:54:08.000Z
|
tests/_old/test_epeso.py
|
stevenkfirth/eprun
|
2a580f8ac0b5976cb1bc84328ffb821bd31731e6
|
[
"MIT"
] | null | null | null |
tests/_old/test_epeso.py
|
stevenkfirth/eprun
|
2a580f8ac0b5976cb1bc84328ffb821bd31731e6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
import eprun
from eprun import EPEso
from pprint import pprint
import pandas as pd
class Test_EPEso(unittest.TestCase):
def test___init__(self):
""
self.assertIsInstance(e,
EPEso)
def test_get_environment(self):
""
se=e.get_environment('RUN PERIOD 1')
self.assertEqual(str(se),
'EPEsoSimuationEnvironment(environment_title="RUN PERIOD 1")')
def test_get_environments(self):
""
envs=e.get_environments()
#print(str(envs))
self.assertIsInstance(envs,
list)
self.assertEqual(len(envs),
3)
self.assertIsInstance(envs[0],
eprun.epeso_simulation_environment.EPEsoSimulationEnvironment)
def test_programme_version_statement(self):
""
self.assertEqual(e.programme_version_statement,
{'programme': 'EnergyPlus',
'version': 'Version 9.4.0-998c4b761e',
'timestamp': 'YMD=2020.11.13 06:25'})
def test_standard_items_dictionary(self):
""
self.assertEqual(e.standard_items_dictionary,
{1: {'comment': None,
'items': [{'name': 'Environment Title', 'unit': None},
{'name': 'Latitude', 'unit': 'deg'},
{'name': 'Longitude', 'unit': 'deg'},
{'name': 'Time Zone', 'unit': None},
{'name': 'Elevation', 'unit': 'm'}],
'number_of_values': 5},
2: {'comment': None,
'items': [{'name': 'Day of Simulation', 'unit': None},
{'name': 'Month', 'unit': None},
{'name': 'Day of Month', 'unit': None},
{'name': 'DST Indicator', 'unit': '1=yes 0=no'},
{'name': 'Hour', 'unit': None},
{'name': 'StartMinute', 'unit': None},
{'name': 'EndMinute', 'unit': None},
{'name': 'DayType', 'unit': None}],
'number_of_values': 8},
3: {'comment': 'When Daily Report Variables Requested',
'items': [{'name': 'Cumulative Day of Simulation', 'unit': None},
{'name': 'Month', 'unit': None},
{'name': 'Day of Month', 'unit': None},
{'name': 'DST Indicator', 'unit': '1=yes 0=no'},
{'name': 'DayType', 'unit': None}],
'number_of_values': 5},
4: {'comment': 'When Monthly Report Variables Requested',
'items': [{'name': 'Cumulative Days of Simulation', 'unit': None},
{'name': 'Month', 'unit': None}],
'number_of_values': 2},
5: {'comment': 'When Run Period Report Variables Requested',
'items': [{'name': 'Cumulative Days of Simulation', 'unit': None}],
'number_of_values': 1},
6: {'comment': 'When Annual Report Variables Requested',
'items': [{'name': 'Calendar Year of Simulation', 'unit': None}],
'number_of_values': 1}}
)
def test_variable_dictionary(self):
""
self.assertEqual(list(e.variable_dictionary.keys()),
[7, 8, 9, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77])
self.assertEqual(e.variable_dictionary[7],
{'comment': 'Hourly',
'number_of_values': 1,
'object_name': 'Environment',
'quantity': 'Site Outdoor Air Drybulb Temperature',
'unit': 'C'}
)
if __name__=='__main__':
e=EPEso(fp=r'files\eplusout.eso')
unittest.main(Test_EPEso())
| 45.543689
| 98
| 0.408229
|
72860a76d7b21d424f4bb528d6290cfee36c32aa
| 70,567
|
py
|
Python
|
cerberus/base.py
|
number09/cerberus
|
88c6cc0ca34507a263bdb2af46cf1459d17ed059
|
[
"ISC"
] | 1
|
2020-02-20T18:15:25.000Z
|
2020-02-20T18:15:25.000Z
|
cerberus/base.py
|
number09/cerberus
|
88c6cc0ca34507a263bdb2af46cf1459d17ed059
|
[
"ISC"
] | null | null | null |
cerberus/base.py
|
number09/cerberus
|
88c6cc0ca34507a263bdb2af46cf1459d17ed059
|
[
"ISC"
] | null | null | null |
import re
import typing
from abc import abstractclassmethod
from ast import literal_eval
from collections import abc, ChainMap
from copy import copy
from datetime import date, datetime
from typing import (
Any,
Callable,
ClassVar,
Container,
Dict,
Generic,
Iterable,
List,
Mapping,
NamedTuple,
Optional,
Sequence,
Set,
Sized,
Tuple,
Type,
Union,
)
from warnings import warn
from cerberus import errors
from cerberus.platform import get_type_args, get_type_origin, ForwardRef, _GenericAlias
from cerberus.typing import (
AllowUnknown,
Document,
DocumentPath,
ErrorHandlerConfig,
FieldName,
NoneType,
RegistryItem,
RegistryItems,
RulesSet,
Schema,
TypesMapping,
)
from cerberus.utils import drop_item_from_tuple, readonly_classproperty
RULE_SCHEMA_SEPARATOR = "The rule's arguments are validated against this schema:"
toy_error_handler = errors.ToyErrorHandler()
_ellipsis = typing.Tuple[int, ...].__args__[-1]
def dummy_for_rule_validation(rule_constraints: str) -> Callable:
def dummy(self, constraint, field, value):
raise RuntimeError(
'Dummy method called. Its purpose is to hold just'
'validation constraints for a rule in its '
'docstring.'
)
f = dummy
f.__doc__ = rule_constraints
return f
# Exceptions
class DocumentError(Exception):
""" Raised when the target document is missing or has the wrong format """
class SchemaError(Exception):
""" Raised when the validation schema is missing, has the wrong format or
contains errors. """
# Schema mangling
def normalize_rulesset(rulesset: RulesSet) -> RulesSet:
""" Transforms a set of rules into a canonical form. """
return normalize_schema({0: rulesset})[0]
def normalize_schema(schema: Schema) -> Schema:
""" Transforms a schema into a canonical form. """
# TODO add a caching mechanism
for rules in schema.values():
if isinstance(rules, str):
continue
if "type" in rules:
constraint = rules["type"]
if not (
isinstance(constraint, Iterable) and not isinstance(constraint, str)
):
rules["type"] = (constraint,)
_expand_generic_type_aliases(rules)
# TODO prepare constraints of other rules to improve validation speed
_expand_schema(schema)
return schema
def _expand_schema(schema: Schema) -> None:
try:
_expand_logical_shortcuts(schema)
_expand_subschemas(schema)
except Exception: # failure is delayed
pass
def _expand_generic_type_aliases(rules: RulesSet) -> None:
compound_types = []
plain_types = []
is_nullable = False
for constraint in _flatten_Union_and_Optional(rules.pop("type")):
if isinstance(constraint, _GenericAlias):
origin = get_type_origin(constraint)
args = get_type_args(constraint)
if issubclass(origin, abc.Mapping) and not constraint.__parameters__:
compound_types.append(
{
"type": origin,
"keysrules": {"type": args[0]},
"valuesrules": {"type": args[1]},
}
)
elif (
issubclass(origin, (abc.MutableSequence, abc.Set))
and not constraint.__parameters__
):
compound_types.append({"type": origin, "itemsrules": {"type": args[0]}})
elif issubclass(origin, tuple) and args:
if args[-1] is _ellipsis:
compound_types.append(
{"type": origin, "itemsrules": {"type": args[0]}}
)
else:
compound_types.append(
{"type": origin, "items": tuple({"type": x} for x in args)}
)
else:
plain_types.append(origin)
# from typing.Optional
elif constraint is NoneType: # type: ignore
is_nullable = True
elif isinstance(constraint, ForwardRef):
plain_types.append(constraint.__forward_arg__)
else:
plain_types.append(constraint)
if compound_types or is_nullable:
if "anyof" in rules:
raise SchemaError(
"The usage of the `anyof` rule is not possible in a ruleset where the"
"`type` rule specifies compound types as constraints."
)
if plain_types:
compound_types.append({"type": tuple(plain_types)})
if is_nullable:
compound_types.append({"nullable": True})
rules["anyof"] = tuple(compound_types)
else:
rules["type"] = tuple(plain_types)
def _flatten_Union_and_Optional(type_constraints):
for constraint in type_constraints:
if get_type_origin(constraint) is typing.Union:
yield from _flatten_Union_and_Optional(get_type_args(constraint))
else:
yield constraint
def _expand_logical_shortcuts(schema):
""" Expand agglutinated rules in a definition-schema.
:param schema: The schema-definition to expand.
:return: The expanded schema-definition.
"""
for rules in schema.values():
if isinstance(rules, str):
continue
for of_rule in (
x for x in rules if x.startswith(('allof_', 'anyof_', 'noneof_', 'oneof_'))
):
operator, rule = of_rule.split('_', 1)
rules[operator] = tuple(
normalize_rulesset({rule: x}) for x in rules[of_rule]
)
rules.pop(of_rule)
def _expand_subschemas(schema):
for rules in schema.values():
if isinstance(rules, str):
continue
if 'schema' in rules:
rules['schema'] = normalize_schema(rules['schema'])
for rule in (
x for x in ('itemsrules', 'keysrules', 'valuesrules') if x in rules
):
rules[rule] = normalize_rulesset(rules[rule])
if isinstance(rules.get("allow_unknown", None), Mapping):
rules["allow_unknown"] = normalize_rulesset(rules["allow_unknown"])
for rule in (
x for x in ('allof', 'anyof', 'items', 'noneof', 'oneof') if x in rules
):
if not isinstance(rules[rule], Sequence):
continue
new_rules_definition = []
for item in rules[rule]:
new_rules_definition.append(normalize_rulesset(item))
rules[rule] = tuple(new_rules_definition)
# Registries
class Registry(Generic[RegistryItem]):
""" A registry to store and retrieve schemas and parts of it by a name
that can be used in validation schemas.
:param definitions: Optional, initial definitions.
"""
def __init__(
self, definitions: Union[RegistryItems, Iterable[Tuple[str, RegistryItem]]] = ()
):
self._storage = {} # type: Dict[str, RegistryItem]
self.extend(definitions)
@abstractclassmethod
def _expand_definition(cls, definition: RegistryItem) -> RegistryItem:
pass
def add(self, name: str, definition: RegistryItem) -> None:
""" Register a definition to the registry. Existing definitions are
replaced silently.
:param name: The name which can be used as reference in a validation
schema.
:param definition: The definition.
"""
self._storage[name] = self._expand_definition(definition)
def all(self) -> RegistryItems:
""" Returns a :class:`dict` with all registered definitions mapped to
their name. """
return self._storage
def clear(self):
""" Purge all definitions in the registry. """
self._storage.clear()
def extend(
self, definitions: Union[RegistryItems, Iterable[Tuple[str, RegistryItem]]]
) -> None:
""" Add several definitions at once. Existing definitions are
replaced silently.
:param definitions: The names and definitions.
"""
for name, definition in dict(definitions).items():
self.add(name, definition)
def get(
self, name: str, default: Optional[RegistryItem] = None
) -> Optional[RegistryItem]:
""" Retrieve a definition from the registry.
:param name: The reference that points to the definition.
:param default: Return value if the reference isn't registered. """
return self._storage.get(name, default)
def remove(self, *names: str) -> None:
""" Unregister definitions from the registry.
:param names: The names of the definitions that are to be
unregistered. """
for name in names:
self._storage.pop(name, None)
class SchemaRegistry(Registry):
@classmethod
def _expand_definition(cls, definition):
return normalize_schema(definition)
class RulesSetRegistry(Registry):
@classmethod
def _expand_definition(cls, definition):
return normalize_rulesset(definition)
schema_registry, rules_set_registry = SchemaRegistry(), RulesSetRegistry()
# Defining types
TypeDefinition = NamedTuple(
'TypeDefinition',
(
('name', str),
('included_types', Tuple[Type[Any], ...]),
('excluded_types', Tuple[Type[Any], ...]),
),
)
"""
This class is used to define types that can be used as value in the
:attr:`~cerberus.Validator.types_mapping` property.
The ``name`` should be descriptive and match the key it is going to be assigned
to.
A value that is validated against such definition must be an instance of any of
the types contained in ``included_types`` and must not match any of the types
contained in ``excluded_types``.
"""
# The Validator
class ValidatorMeta(type):
""" Metaclass for all validators """
def __new__(mcls, name, bases, namespace):
if '__doc__' not in namespace:
namespace['__doc__'] = bases[0].__doc__
return super().__new__(mcls, name, bases, namespace)
def __init__(cls, name, bases, namespace):
def attributes_with_prefix(prefix):
return tuple(
x[len(prefix) + 2 :]
for x in dir(cls)
if x.startswith('_' + prefix + '_')
)
super().__init__(name, bases, namespace)
cls.validation_rules = {
attribute: cls.__get_rule_schema('_validate_' + attribute)
for attribute in attributes_with_prefix('validate')
}
cls.checkers = tuple(x for x in attributes_with_prefix('check_with'))
x = cls.validation_rules['check_with']['oneof']
x[1]['itemsrules']['oneof'][1]['allowed'] = x[2]['allowed'] = cls.checkers
for rule in (x for x in cls.mandatory_validations if x != 'nullable'):
cls.validation_rules[rule]['required'] = True
cls.coercers, cls.default_setters, cls.normalization_rules = (), (), {}
for attribute in attributes_with_prefix('normalize'):
if attribute.startswith('coerce_'):
cls.coercers += (attribute[len('coerce_') :],)
elif attribute.startswith('default_setter_'):
cls.default_setters += (attribute[len('default_setter_') :],)
else:
cls.normalization_rules[attribute] = cls.__get_rule_schema(
'_normalize_' + attribute
)
for rule in ('coerce', 'rename_handler'):
x = cls.normalization_rules[rule]['oneof']
x[1]['itemsrules']['oneof'][1]['allowed'] = x[2]['allowed'] = cls.coercers
cls.normalization_rules['default_setter']['oneof'][1][
'allowed'
] = cls.default_setters
cls.rules = ChainMap(cls.normalization_rules, cls.validation_rules)
def __get_rule_schema(mcls, method_name):
docstring = getattr(mcls, method_name).__doc__
if docstring is None:
result = {}
else:
if RULE_SCHEMA_SEPARATOR in docstring:
docstring = docstring.split(RULE_SCHEMA_SEPARATOR)[1]
try:
result = literal_eval(docstring.strip())
except Exception:
result = {}
if not result and method_name != '_validate_meta':
warn(
"No validation schema is defined for the arguments of rule "
"'%s'" % method_name.split('_', 2)[-1]
)
return result
class UnconcernedValidator(metaclass=ValidatorMeta):
""" Validator class. Normalizes and/or validates any mapping against a
validation-schema which is provided as an argument at class instantiation
or upon calling the :meth:`~cerberus.Validator.validate`,
:meth:`~cerberus.Validator.validated` or
:meth:`~cerberus.Validator.normalized` method. An instance itself is
callable and executes a validation.
All instantiation parameters are optional.
There are the introspective properties :attr:`types`, :attr:`validators`,
:attr:`coercers`, :attr:`default_setters`, :attr:`rules`,
:attr:`normalization_rules` and :attr:`validation_rules`.
The attributes reflecting the available rules are assembled considering
constraints that are defined in the docstrings of rules' methods and is
effectively used as validation schema for :attr:`schema`.
:param schema: See :attr:`~cerberus.Validator.schema`.
Defaults to :obj:`None`.
:param ignore_none_values: See :attr:`~cerberus.Validator.ignore_none_values`.
Defaults to ``False``.
:param allow_unknown: See :attr:`~cerberus.Validator.allow_unknown`.
Defaults to ``False``.
:param require_all: See :attr:`~cerberus.Validator.require_all`.
Defaults to ``False``.
:param purge_unknown: See :attr:`~cerberus.Validator.purge_unknown`.
Defaults to to ``False``.
:param purge_readonly: Removes all fields that are defined as ``readonly`` in the
normalization phase.
:param error_handler: The error handler that formats the result of
:attr:`~cerberus.Validator.errors`.
When given as two-value tuple with an error-handler
class and a dictionary, the latter is passed to the
initialization of the error handler.
Default: :class:`~cerberus.errors.BasicErrorHandler`.
"""
mandatory_validations = ('nullable',) # type: ClassVar[Tuple[str, ...]]
""" Rules that are evaluated on any field, regardless whether defined in
the schema or not."""
priority_validations = (
'nullable',
'readonly',
'type',
'empty',
) # type: ClassVar[Tuple[str, ...]]
""" Rules that will be processed in that order before any other. """
types_mapping = {
'boolean': TypeDefinition('boolean', (bool,), ()),
'bytearray': TypeDefinition('bytearray', (bytearray,), ()),
'bytes': TypeDefinition('bytes', (bytes,), ()),
'complex': TypeDefinition('complex', (complex,), ()),
'date': TypeDefinition('date', (date,), (datetime,)),
'datetime': TypeDefinition('datetime', (datetime,), ()),
'dict': TypeDefinition('dict', (Mapping,), ()),
'float': TypeDefinition('float', (float,), ()),
'frozenset': TypeDefinition('frozenset', (frozenset,), ()),
'integer': TypeDefinition('integer', (int,), (bool,)),
'list': TypeDefinition('list', (list,), ()),
'number': TypeDefinition('number', (int, float), (bool,)),
'set': TypeDefinition('set', (set,), ()),
'string': TypeDefinition('string', (str,), ()),
'tuple': TypeDefinition('tuple', (tuple,), ()),
'type': TypeDefinition('type', (type,), ()),
} # type: ClassVar[TypesMapping]
""" This mapping holds all available constraints for the type rule and
their assigned :class:`~cerberus.TypeDefinition`. """
types_mapping.update(
(x, TypeDefinition(x, (getattr(abc, x),), ()))
for x in abc.__all__ # type: ignore
)
_valid_schemas = set() # type: ClassVar[Set[Tuple[int, int]]]
""" A :class:`set` of hashes derived from validation schemas that are
legit for a particular ``Validator`` class. """
# these will be set by the metaclass, here type hints are given:
checkers = () # type: ClassVar[Tuple[str, ...]]
coercers = () # type: ClassVar[Tuple[str, ...]]
default_setters = () # type: ClassVar[Tuple[str, ...]]
normalization_rules = {} # type: ClassVar[Schema]
rules = {} # type: ClassVar[Dict[str, RulesSet]]
validation_rules = {} # type: ClassVar[Schema]
def __init__(
self,
schema: Schema = None,
*,
allow_unknown: AllowUnknown = False,
error_handler: ErrorHandlerConfig = errors.BasicErrorHandler,
ignore_none_values: bool = False,
purge_unknown: bool = False,
purge_readonly: bool = False,
require_all: bool = False,
**extra_config: Any
):
self._config = extra_config # type: Dict[str, Any]
""" This dictionary holds the configuration arguments that were used to
initialize the :class:`Validator` instance except the ``error_handler``. """
self._config.update(
{
'allow_unknown': allow_unknown,
'error_handler': error_handler,
'ignore_none_values': ignore_none_values,
'purge_readonly': purge_readonly,
'purge_unknown': purge_unknown,
'require_all': require_all,
}
)
self.document = None # type: Optional[Document]
""" The document that is or was recently processed.
Type: any :term:`mapping` """
self._errors = errors.ErrorList()
""" The list of errors that were encountered since the last document
processing was invoked.
Type: :class:`~cerberus.errors.ErrorList` """
self.recent_error = None # type: Optional[errors.ValidationError]
""" The last individual error that was submitted.
Type: :class:`~cerberus.errors.ValidationError` or ``None`` """
self.document_error_tree = errors.DocumentErrorTree()
""" A tree representiation of encountered errors following the
structure of the document.
Type: :class:`~cerberus.errors.DocumentErrorTree` """
self.schema_error_tree = errors.SchemaErrorTree()
""" A tree representiation of encountered errors following the
structure of the schema.
Type: :class:`~cerberus.errors.SchemaErrorTree` """
self.document_path = () # type: DocumentPath
""" The path within the document to the current sub-document.
Type: :class:`tuple` """
self.schema_path = () # type: DocumentPath
""" The path within the schema to the current sub-schema.
Type: :class:`tuple` """
self.update = False
self.error_handler = self.__init_error_handler(error_handler)
""" The error handler used to format :attr:`~cerberus.Validator.errors`
and process submitted errors with
:meth:`~cerberus.Validator._error`.
Type: :class:`~cerberus.errors.BaseErrorHandler` """
self.schema = schema
self.allow_unknown = allow_unknown
self._remaining_rules = [] # type: List[str]
""" Keeps track of the rules that are next in line to be evaluated during the
validation of a field. Type: :class:`list` """
super().__init__()
@staticmethod
def __init_error_handler(config: ErrorHandlerConfig) -> errors.BaseErrorHandler:
if isinstance(config, errors.BaseErrorHandler):
return config
if isinstance(config, tuple):
error_handler, eh_config = config
else:
error_handler, eh_config = config, {}
if isinstance(error_handler, type) and issubclass(
error_handler, errors.BaseErrorHandler
):
return error_handler(**eh_config)
else:
raise RuntimeError('Invalid error_handler configuration.')
@classmethod
def clear_caches(cls):
""" Purge the cache of known valid schemas. """
cls._valid_schemas.clear()
def _error(self, *args):
""" Creates and adds one or multiple errors.
:param args: Accepts different argument's signatures.
*1. Bulk addition of errors:*
- :term:`iterable` of
:class:`~cerberus.errors.ValidationError`-instances
The errors will be added to
:attr:`~cerberus.Validator._errors`.
*2. Custom error:*
- the invalid field's name
- the error message
A custom error containing the message will be created and
added to :attr:`~cerberus.Validator._errors`.
There will however be fewer information contained in the
error (no reference to the violated rule and its
constraint).
*3. Defined error:*
- the invalid field's name
- the error-reference, see :mod:`cerberus.errors`
- arbitrary, supplemental information about the error
A :class:`~cerberus.errors.ValidationError` instance will
be created and added to
:attr:`~cerberus.Validator._errors`.
"""
if len(args) == 1:
self._errors.extend(args[0])
self._errors.sort()
for error in args[0]:
self.document_error_tree.add(error)
self.schema_error_tree.add(error)
self.error_handler.emit(error)
elif len(args) == 2 and isinstance(args[1], str):
self._error(args[0], errors.CUSTOM, args[1])
elif len(args) >= 2:
field = args[0]
code = args[1].code
rule = args[1].rule
info = args[2:]
document_path = self.document_path + (field,)
schema_path = self.schema_path
if code != errors.UNKNOWN_FIELD.code and rule is not None:
schema_path += (field, rule)
if not rule:
constraint = None
else:
field_definitions = self._resolve_rules_set(self.schema[field])
if rule == 'nullable':
constraint = field_definitions.get(rule, False)
elif rule == 'required':
constraint = field_definitions.get(rule, self.require_all)
if rule not in field_definitions:
schema_path = "__require_all__"
else:
constraint = field_definitions[rule]
value = self.document.get(field)
self.recent_error = errors.ValidationError(
document_path, schema_path, code, rule, constraint, value, info
)
self._error([self.recent_error])
def _get_child_validator(
self,
document_crumb: Union[FieldName, Iterable[FieldName], None] = None,
schema_crumb: Union[FieldName, Iterable[FieldName], None] = None,
**kwargs: Any
) -> 'UnconcernedValidator':
""" Creates a new instance of Validator-(sub-)class. All initial parameters of
the parent are passed to the initialization, unless a parameter is given as
an explicit *keyword*-parameter.
:param document_crumb: Extends the :attr:`~cerberus.Validator.document_path`
of the child-validator.
:param schema_crumb: Extends the :attr:`~cerberus.Validator.schema_path`
of the child-validator.
:param kwargs: Overriding keyword-arguments for initialization.
"""
child_config = ChainMap(kwargs, self._config)
if not self.is_child:
child_config = child_config.new_child(
{
'is_child': True,
'error_handler': toy_error_handler,
'root_allow_unknown': self.allow_unknown,
'root_document': self.document,
'root_schema': self.schema,
}
)
child_validator = self.__class__(**child_config)
if document_crumb is None:
child_validator.document_path = self.document_path
else:
if not isinstance(document_crumb, tuple):
document_crumb = (document_crumb,)
child_validator.document_path = self.document_path + document_crumb
if schema_crumb is None:
child_validator.schema_path = self.schema_path
else:
if not isinstance(schema_crumb, tuple):
schema_crumb = (schema_crumb,)
child_validator.schema_path = self.schema_path + schema_crumb
return child_validator
def __get_rule_handler(self, domain, rule):
methodname = '_{0}_{1}'.format(domain, rule.replace(' ', '_'))
result = getattr(self, methodname, None)
if result is None:
raise RuntimeError(
"There's no handler for '{}' in the '{}' "
"domain.".format(rule, domain)
)
return result
def _drop_nodes_from_errorpaths(
self,
_errors: errors.ErrorList,
dp_items: Iterable[int],
sp_items: Iterable[int],
) -> None:
""" Removes nodes by index from an errorpath, relatively to the
basepaths of self.
:param errors: A list of :class:`errors.ValidationError` instances.
:param dp_items: A list of integers, pointing at the nodes to drop from
the :attr:`document_path`.
:param sp_items: Alike ``dp_items``, but for :attr:`schema_path`.
"""
dp_basedepth = len(self.document_path)
sp_basedepth = len(self.schema_path)
for error in _errors:
for i in sorted(dp_items, reverse=True):
error.document_path = drop_item_from_tuple(
error.document_path, dp_basedepth + i
)
for i in sorted(sp_items, reverse=True):
error.schema_path = drop_item_from_tuple(
error.schema_path, sp_basedepth + i
)
if error.child_errors:
self._drop_nodes_from_errorpaths(error.child_errors, dp_items, sp_items)
def _lookup_field(self, path):
""" Searches for a field as defined by path. This method is used by the
``dependency`` evaluation logic.
:param path: Path elements are separated by a ``.``. A leading ``^``
indicates that the path relates to the document root,
otherwise it relates to the currently evaluated document,
which is possibly a subdocument.
The sequence ``^^`` at the start will be interpreted as a
literal ``^``.
:type path: :class:`str`
:returns: Either the found field name and its value or :obj:`None` for
both.
:rtype: A two-value :class:`tuple`.
"""
if path.startswith('^'):
path = path[1:]
context = self.document if path.startswith('^') else self.root_document
else:
context = self.document
parts = path.split('.')
for part in parts:
if part not in context:
return None, None
context = context.get(part, {})
return parts[-1], context
def _resolve_rules_set(self, rules_set):
if isinstance(rules_set, Mapping):
return rules_set
elif isinstance(rules_set, str):
return self.rules_set_registry.get(rules_set)
return None
def _resolve_schema(self, schema):
if isinstance(schema, Mapping):
return schema
elif isinstance(schema, str):
return self.schema_registry.get(schema)
return None
# Properties
@property
def allow_unknown(self) -> AllowUnknown:
""" If ``True`` unknown fields that are not defined in the schema will
be ignored. If a mapping with a validation schema is given, any
undefined field will be validated against its rules.
Also see :ref:`allowing-the-unknown`.
Type: :class:`bool` or any :term:`mapping` """
return self._config.get('allow_unknown', False)
@allow_unknown.setter
def allow_unknown(self, value: AllowUnknown) -> None:
if isinstance(value, Mapping):
self._config['allow_unknown'] = normalize_rulesset(value)
elif isinstance(value, bool):
self._config['allow_unknown'] = value
else:
raise TypeError
@property
def errors(self) -> Any:
""" The errors of the last processing formatted by the handler that is
bound to :attr:`~cerberus.Validator.error_handler`. """
return self.error_handler(self._errors)
@property
def ignore_none_values(self) -> bool:
""" Whether to not process :obj:`None`-values in a document or not.
Type: :class:`bool` """
return self._config.get('ignore_none_values', False)
@ignore_none_values.setter
def ignore_none_values(self, value: bool) -> None:
self._config['ignore_none_values'] = value
@property
def is_child(self) -> bool:
""" ``True`` for child-validators obtained with
:meth:`~cerberus.Validator._get_child_validator`.
Type: :class:`bool` """
return self._config.get('is_child', False)
@property
def _is_normalized(self) -> bool:
""" ``True`` if the document is already normalized. """
return self._config.get('_is_normalized', False)
@_is_normalized.setter
def _is_normalized(self, value: bool) -> None:
self._config['_is_normalized'] = value
@property
def purge_unknown(self) -> bool:
""" If ``True``, unknown fields will be deleted from the document
unless a validation is called with disabled normalization.
Also see :ref:`purging-unknown-fields`. Type: :class:`bool` """
return self._config.get('purge_unknown', False)
@purge_unknown.setter
def purge_unknown(self, value: bool) -> None:
self._config['purge_unknown'] = value
@property
def purge_readonly(self) -> bool:
""" If ``True``, fields declared as readonly will be deleted from the
document unless a validation is called with disabled normalization.
Type: :class:`bool` """
return self._config.get('purge_readonly', False)
@purge_readonly.setter
def purge_readonly(self, value: bool) -> None:
self._config['purge_readonly'] = value
@property
def require_all(self) -> bool:
""" If ``True`` known fields that are defined in the schema will
be required. Type: :class:`bool` """
return self._config.get('require_all', False)
@require_all.setter
def require_all(self, value: bool) -> None:
self._config['require_all'] = value
@property
def root_allow_unknown(self) -> AllowUnknown:
""" The :attr:`~cerberus.Validator.allow_unknown` attribute of the
first level ancestor of a child validator. """
return self._config.get('root_allow_unknown', self.allow_unknown)
@property
def root_require_all(self) -> bool:
""" The :attr:`~cerberus.Validator.require_all` attribute of
the first level ancestor of a child validator. """
return self._config.get('root_require_all', self.require_all)
@property
def root_document(self) -> Document:
""" The :attr:`~cerberus.Validator.document` attribute of the
first level ancestor of a child validator. """
return self._config.get('root_document', self.document)
@property
def rules_set_registry(self) -> RulesSetRegistry:
""" The registry that holds referenced rules sets.
Type: :class:`~cerberus.Registry` """
return self._config.get('rules_set_registry', rules_set_registry)
@rules_set_registry.setter
def rules_set_registry(self, registry: RulesSetRegistry) -> None:
self._config['rules_set_registry'] = registry
@property
def root_schema(self) -> Optional[Schema]:
""" The :attr:`~cerberus.Validator.schema` attribute of the
first level ancestor of a child validator. """
return self._config.get('root_schema', self.schema)
@property # type: ignore
def schema(self):
""" The validation schema of a validator. When a schema is passed to
a validator method (e.g. ``validate``), it replaces this attribute.
Type: any :term:`mapping` or :obj:`None` """
return self._schema
@schema.setter
def schema(self, schema):
if schema is None:
self._schema = None
elif self.is_child:
self._schema = schema
else:
self._schema = normalize_schema(schema)
@property
def schema_registry(self) -> SchemaRegistry:
""" The registry that holds referenced schemas.
Type: :class:`~cerberus.Registry` """
return self._config.get('schema_registry', schema_registry)
@schema_registry.setter
def schema_registry(self, registry: SchemaRegistry) -> None:
self._config['schema_registry'] = registry
# FIXME the returned method has the correct docstring, but doesn't appear
# in the API docs
@readonly_classproperty
def types(cls) -> Tuple[str, ...]:
""" The constraints that can be used for the 'type' rule.
Type: A tuple of strings. """
return tuple(cls.types_mapping)
# Document processing
def __init_processing(self, document, schema=None):
self._errors = errors.ErrorList()
self.recent_error = None
self.document_error_tree = errors.DocumentErrorTree()
self.schema_error_tree = errors.SchemaErrorTree()
self.document = copy(document)
if not self.is_child:
self._is_normalized = False
self.__init_schema(schema)
if self.schema is None:
if isinstance(self.allow_unknown, Mapping):
self.schema = {}
else:
raise SchemaError(errors.MISSING_SCHEMA)
if document is None:
raise DocumentError(errors.DOCUMENT_MISSING)
if not isinstance(document, Mapping):
raise DocumentError(errors.DOCUMENT_FORMAT.format(document))
self.error_handler.start(self)
def __init_schema(self, schema):
if schema is not None:
self.schema = schema
def _drop_remaining_rules(self, *rules):
""" Drops rules from the queue of the rules that still need to be
evaluated for the currently processed field.
If no arguments are given, the whole queue is emptied.
"""
if rules:
for rule in rules:
try:
self._remaining_rules.remove(rule)
except ValueError:
pass
else:
self._remaining_rules = []
# # Normalizing
def normalized(
self,
document: Document,
schema: Optional[Schema] = None,
always_return_document: bool = False,
) -> Optional[Document]:
"""
Returns the document normalized according to the specified rules of a schema.
:param document: The document to normalize.
:param schema: The validation schema. Defaults to :obj:`None`. If not
provided here, the schema must have been provided at
class instantiation.
:param always_return_document: Return the document, even if an error
occurred. Defaults to: ``False``.
:return: A normalized copy of the provided mapping or :obj:`None` if an
error occurred during normalization.
"""
self.__init_processing(document, schema)
self.__normalize_mapping(self.document, self.schema)
self.error_handler.end(self)
if self._errors and not always_return_document:
return None
else:
return self.document
def __normalize_mapping(self, mapping, schema):
if isinstance(schema, str):
schema = self._resolve_schema(schema)
schema = schema.copy()
for field in schema:
schema[field] = self._resolve_rules_set(schema[field])
self.__normalize_rename_fields(mapping, schema)
if self.purge_unknown and not self.allow_unknown:
self._normalize_purge_unknown(mapping, schema)
if self.purge_readonly:
self.__normalize_purge_readonly(mapping, schema)
# Check `readonly` fields before applying default values because
# a field's schema definition might contain both `readonly` and
# `default`.
self.__validate_readonly_fields(mapping, schema)
self.__normalize_default_fields(mapping, schema)
self._normalize_coerce(mapping, schema)
self.__normalize_containers(mapping, schema)
self._is_normalized = True
return mapping
def _normalize_coerce(self, mapping, schema):
""" {'oneof': [
{'type': 'Callable'},
{'type': 'Iterable',
'itemsrules': {'oneof': [{'type': 'Callable'},
{'type': 'string'}]}},
{'type': 'string'}
]} """
error = errors.COERCION_FAILED
for field in mapping:
if field in schema and 'coerce' in schema[field]:
mapping[field] = self.__normalize_coerce(
schema[field]['coerce'],
field,
mapping[field],
schema[field].get('nullable', False),
error,
)
elif (
isinstance(self.allow_unknown, Mapping)
and 'coerce' in self.allow_unknown
):
mapping[field] = self.__normalize_coerce(
self.allow_unknown['coerce'],
field,
mapping[field],
self.allow_unknown.get('nullable', False),
error,
)
def __normalize_coerce(self, processor, field, value, nullable, error):
if isinstance(processor, str):
processor = self.__get_rule_handler('normalize_coerce', processor)
elif isinstance(processor, Iterable):
result = value
for p in processor:
result = self.__normalize_coerce(p, field, result, nullable, error)
if (
errors.COERCION_FAILED
in self.document_error_tree.fetch_errors_from(
self.document_path + (field,)
)
):
break
return result
try:
return processor(value)
except Exception as e:
if not (nullable and value is None):
self._error(field, error, str(e))
return value
def __normalize_containers(self, mapping, schema):
for field in mapping:
rules = set(schema.get(field, ()))
if isinstance(mapping[field], Mapping):
if 'keysrules' in rules:
self.__normalize_mapping_per_keysrules(
field, mapping, schema[field]['keysrules']
)
if 'valuesrules' in rules:
self.__normalize_mapping_per_valuesrules(
field, mapping, schema[field]['valuesrules']
)
if any(
x in rules for x in ('allow_unknown', 'purge_unknown', 'schema')
) or isinstance(self.allow_unknown, Mapping):
self.__normalize_mapping_per_schema(field, mapping, schema)
elif isinstance(mapping[field], str):
continue
elif isinstance(mapping[field], Sequence):
if 'itemsrules' in rules:
self.__normalize_sequence_per_itemsrules(field, mapping, schema)
elif 'items' in rules:
self.__normalize_sequence_per_items(field, mapping, schema)
def __normalize_mapping_per_keysrules(self, field, mapping, property_rules):
schema = {k: property_rules for k in mapping[field]}
document = {k: k for k in mapping[field]}
validator = self._get_child_validator(
document_crumb=field, schema_crumb=(field, 'keysrules'), schema=schema
)
result = validator.normalized(document, always_return_document=True)
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2, 4])
self._error(validator._errors)
for k in result:
if k == result[k]:
continue
if result[k] in mapping[field]:
warn(
"Normalizing keys of {path}: {key} already exists, "
"its value is replaced.".format(
path='.'.join(str(x) for x in self.document_path + (field,)),
key=k,
)
)
mapping[field][result[k]] = mapping[field][k]
else:
mapping[field][result[k]] = mapping[field][k]
del mapping[field][k]
def __normalize_mapping_per_valuesrules(self, field, mapping, value_rules):
schema = {k: value_rules for k in mapping[field]}
validator = self._get_child_validator(
document_crumb=field, schema_crumb=(field, 'valuesrules'), schema=schema
)
mapping[field] = validator.normalized(
mapping[field], always_return_document=True
)
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2])
self._error(validator._errors)
def __normalize_mapping_per_schema(self, field, mapping, schema):
rules = schema.get(field, {})
if not rules and isinstance(self.allow_unknown, Mapping):
rules = self.allow_unknown
validator = self._get_child_validator(
document_crumb=field,
schema_crumb=(field, 'schema'),
schema=rules.get('schema', {}),
allow_unknown=rules.get('allow_unknown', self.allow_unknown), # noqa: E501
purge_unknown=rules.get('purge_unknown', self.purge_unknown),
require_all=rules.get('require_all', self.require_all),
) # noqa: E501
value_type = type(mapping[field])
result_value = validator.normalized(mapping[field], always_return_document=True)
mapping[field] = value_type(result_value)
if validator._errors:
self._error(validator._errors)
def __normalize_sequence_per_items(self, field, mapping, schema):
rules, values = schema[field]['items'], mapping[field]
if len(rules) != len(values):
return
schema = {k: v for k, v in enumerate(rules)}
document = {k: v for k, v in enumerate(values)}
validator = self._get_child_validator(
document_crumb=field, schema_crumb=(field, 'items'), schema=schema
)
value_type = type(mapping[field])
result = validator.normalized(document, always_return_document=True)
mapping[field] = value_type(result.values())
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2])
self._error(validator._errors)
def __normalize_sequence_per_itemsrules(self, field, mapping, schema):
schema = {k: schema[field]['itemsrules'] for k in range(len(mapping[field]))}
document = {k: v for k, v in enumerate(mapping[field])}
validator = self._get_child_validator(
document_crumb=field, schema_crumb=(field, 'itemsrules'), schema=schema
)
value_type = type(mapping[field])
result = validator.normalized(document, always_return_document=True)
mapping[field] = value_type(result.values())
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2])
self._error(validator._errors)
@staticmethod
def __normalize_purge_readonly(mapping, schema):
for field in [x for x in mapping if schema.get(x, {}).get('readonly', False)]:
mapping.pop(field)
return mapping
@staticmethod
def _normalize_purge_unknown(mapping, schema):
""" {'type': 'boolean'} """
for field in [x for x in mapping if x not in schema]:
mapping.pop(field)
return mapping
def __normalize_rename_fields(self, mapping, schema):
for field in tuple(mapping):
if field in schema:
self._normalize_rename(mapping, schema, field)
self._normalize_rename_handler(mapping, schema, field)
elif (
isinstance(self.allow_unknown, Mapping)
and 'rename_handler' in self.allow_unknown
):
self._normalize_rename_handler(
mapping, {field: self.allow_unknown}, field
)
return mapping
def _normalize_rename(self, mapping, schema, field):
""" {'type': 'Hashable'} """
if 'rename' in schema[field]:
mapping[schema[field]['rename']] = mapping[field]
del mapping[field]
def _normalize_rename_handler(self, mapping, schema, field):
""" {'oneof': [
{'type': 'Callable'},
{'type': 'Iterable',
'itemsrules': {'oneof': [{'type': 'Callable'},
{'type': 'string'}]}},
{'type': 'string'}
]} """
if 'rename_handler' not in schema[field]:
return
new_name = self.__normalize_coerce(
schema[field]['rename_handler'], field, field, False, errors.RENAMING_FAILED
)
if new_name != field:
mapping[new_name] = mapping[field]
del mapping[field]
def __validate_readonly_fields(self, mapping, schema):
for field in (
x
for x in schema
if x in mapping and self._resolve_rules_set(schema[x]).get('readonly')
):
self._validate_readonly(schema[field]['readonly'], field, mapping[field])
def __normalize_default_fields(self, mapping, schema):
empty_fields = [
x
for x in schema
if x not in mapping
or (
mapping[x] is None # noqa: W503
and not schema[x].get('nullable', False)
) # noqa: W503
]
fields_with_default = [x for x in empty_fields if 'default' in schema[x]]
for field in fields_with_default:
self._normalize_default(mapping, schema, field)
known_fields_states = set()
fields_with_default_setter = [
x for x in empty_fields if 'default_setter' in schema[x]
]
while fields_with_default_setter:
field = fields_with_default_setter.pop(0)
try:
self._normalize_default_setter(mapping, schema, field)
except KeyError:
fields_with_default_setter.append(field)
except Exception as e:
self._error(field, errors.SETTING_DEFAULT_FAILED, str(e))
fields_processing_state = hash(tuple(fields_with_default_setter))
if fields_processing_state in known_fields_states:
for field in fields_with_default_setter:
self._error(
field,
errors.SETTING_DEFAULT_FAILED,
'Circular dependencies of default setters.',
)
break
else:
known_fields_states.add(fields_processing_state)
def _normalize_default(self, mapping, schema, field):
""" {'nullable': True} """
mapping[field] = schema[field]['default']
def _normalize_default_setter(self, mapping, schema, field):
""" {'oneof': [
{'type': 'Callable'},
{'type': 'string'}
]} """
if 'default_setter' in schema[field]:
setter = schema[field]['default_setter']
if isinstance(setter, str):
setter = self.__get_rule_handler('normalize_default_setter', setter)
mapping[field] = setter(mapping)
# # Validating
def validate(
self,
document: Document,
schema: Optional[Schema] = None,
update: bool = False,
normalize: bool = True,
) -> bool:
"""
Normalizes and validates a mapping against a validation-schema of defined rules.
:param document: The document to normalize.
:param schema: The validation schema. Defaults to :obj:`None`. If not provided
here, the schema must have been provided at class instantiation.
:param update: If ``True``, required fields won't be checked.
:param normalize: If ``True``, normalize the document before validation.
:return: ``True`` if validation succeeds, otherwise ``False``. Check
the :func:`errors` property for a list of processing errors.
"""
self.update = update
self._unrequired_by_excludes = set() # type: Set[FieldName]
self.__init_processing(document, schema)
if normalize:
self.__normalize_mapping(self.document, self.schema)
for field in self.document: # type: ignore
if self.ignore_none_values and self.document[field] is None: # type: ignore
continue
definitions = self.schema.get(field) # type: ignore
if definitions is not None:
self.__validate_definitions(definitions, field)
else:
self.__validate_unknown_fields(field)
if not self.update:
self.__validate_required_fields(self.document)
self.error_handler.end(self)
return not bool(self._errors)
__call__ = validate
def validated(
self,
document: Document,
schema: Optional[Schema] = None,
update: bool = False,
normalize: bool = True,
always_return_document: bool = False,
) -> Optional[Document]:
"""
Wrapper around :meth:`~cerberus.Validator.validate` that returns the normalized
and validated document or :obj:`None` if validation failed.
"""
self.validate(
document=document, schema=schema, update=update, normalize=normalize
)
if self._errors and not always_return_document:
return None
else:
return self.document
def __validate_unknown_fields(self, field):
if self.allow_unknown:
value = self.document[field]
if isinstance(self.allow_unknown, (Mapping, str)):
# validate that unknown fields matches the schema
# for unknown_fields
schema_crumb = 'allow_unknown' if self.is_child else '__allow_unknown__'
validator = self._get_child_validator(
schema_crumb=schema_crumb, schema={field: self.allow_unknown}
)
if not validator({field: value}, normalize=False):
self._error(validator._errors)
else:
self._error(field, errors.UNKNOWN_FIELD)
def __validate_definitions(self, definitions, field):
""" Validate a field's value against its defined rules. """
definitions = self._resolve_rules_set(definitions)
value = self.document[field]
rules_queue = [
x
for x in self.priority_validations
if x in definitions or x in self.mandatory_validations
]
rules_queue.extend(
x for x in self.mandatory_validations if x not in rules_queue
)
rules_queue.extend(
x
for x in definitions
if x not in rules_queue
and x not in self.normalization_rules
and x not in ('allow_unknown', 'require_all', 'meta', 'required')
)
self._remaining_rules = rules_queue
while self._remaining_rules:
rule = self._remaining_rules.pop(0)
rule_handler = self.__get_rule_handler('validate', rule)
rule_handler(definitions.get(rule, None), field, value)
# Remember to keep the validation methods below this line
# sorted alphabetically
_validate_allow_unknown = dummy_for_rule_validation(
""" {'oneof': [{'type': 'boolean'},
{'type': ['dict', 'string'],
'check_with': 'rulesset'}]} """
)
def _validate_allowed(self, allowed_values, field, value):
""" {'type': 'container_but_not_string'} """
if isinstance(value, Iterable) and not isinstance(value, str):
unallowed = tuple(x for x in value if x not in allowed_values)
if unallowed:
self._error(field, errors.UNALLOWED_VALUES, unallowed)
else:
if value not in allowed_values:
self._error(field, errors.UNALLOWED_VALUE, value)
def _validate_check_with(self, checks, field, value):
""" {'oneof': [
{'type': 'Callable'},
{'type': 'Iterable',
'itemsrules': {'oneof': [{'type': 'Callable'},
{'type': 'string'}]}},
{'type': 'string'}
]}
"""
if isinstance(checks, str):
value_checker = self.__get_rule_handler('check_with', checks)
value_checker(field, value)
elif isinstance(checks, Iterable):
for v in checks:
self._validate_check_with(v, field, value)
else:
checks(field, value, self._error)
def _validate_contains(self, expected_values, field, value):
""" {'empty': False } """
if not isinstance(value, Container):
return
if not isinstance(expected_values, Iterable) or isinstance(
expected_values, str
):
expected_values = set((expected_values,))
else:
expected_values = set(expected_values)
missing_values = expected_values - set(value)
if missing_values:
self._error(field, errors.MISSING_MEMBERS, missing_values)
def _validate_dependencies(self, dependencies, field, value):
""" {'type': ('Hashable', 'Iterable', 'Mapping'),
'check_with': 'dependencies'} """
if isinstance(dependencies, str):
dependencies = (dependencies,)
if isinstance(dependencies, Sequence):
self.__validate_dependencies_sequence(dependencies, field)
elif isinstance(dependencies, Mapping):
self.__validate_dependencies_mapping(dependencies, field)
if (
self.document_error_tree.fetch_node_from(
self.schema_path + (field, 'dependencies')
)
is not None
):
return True
def __validate_dependencies_mapping(self, dependencies, field):
validated_dependencies_counter = 0
error_info = {}
for dependency_name, dependency_values in dependencies.items():
if not isinstance(dependency_values, Sequence) or isinstance(
dependency_values, str
):
dependency_values = [dependency_values]
wanted_field, wanted_field_value = self._lookup_field(dependency_name)
if wanted_field_value in dependency_values:
validated_dependencies_counter += 1
else:
error_info.update({dependency_name: wanted_field_value})
if validated_dependencies_counter != len(dependencies):
self._error(field, errors.DEPENDENCIES_FIELD_VALUE, error_info)
def __validate_dependencies_sequence(self, dependencies, field):
for dependency in dependencies:
if self._lookup_field(dependency)[0] is None:
self._error(field, errors.DEPENDENCIES_FIELD, dependency)
def _validate_empty(self, empty, field, value):
""" {'type': 'boolean'} """
if isinstance(value, Sized) and len(value) == 0:
self._drop_remaining_rules(
'allowed',
'forbidden',
'items',
'minlength',
'maxlength',
'regex',
'check_with',
)
if not empty:
self._error(field, errors.EMPTY)
def _validate_excludes(self, excluded_fields, field, value):
""" {'type': ('Hashable', 'Iterable'),
'itemsrules': {'type': 'Hashable'}} """
if isinstance(excluded_fields, str) or not isinstance(
excluded_fields, Container
):
excluded_fields = (excluded_fields,)
# Mark the currently evaluated field as not required for now if it actually is.
# One of the so marked will be needed to pass when required fields are checked.
if self.schema[field].get('required', self.require_all):
self._unrequired_by_excludes.add(field)
for excluded_field in excluded_fields:
if excluded_field in self.schema and self.schema[field].get(
'required', self.require_all
):
self._unrequired_by_excludes.add(excluded_field)
if any(excluded_field in self.document for excluded_field in excluded_fields):
exclusion_str = ', '.join(
"'{0}'".format(field) for field in excluded_fields
)
self._error(field, errors.EXCLUDES_FIELD, exclusion_str)
def _validate_forbidden(self, forbidden_values, field, value):
""" {'type': 'Container'} """
if isinstance(value, str):
if value in forbidden_values:
self._error(field, errors.FORBIDDEN_VALUE, value)
elif isinstance(value, Sequence):
forbidden = set(value) & set(forbidden_values)
if forbidden:
self._error(field, errors.FORBIDDEN_VALUES, list(forbidden))
else:
if value in forbidden_values:
self._error(field, errors.FORBIDDEN_VALUE, value)
def _validate_items(self, items, field, values):
""" {'type': 'Sequence', 'check_with': 'items'} """
if len(items) != len(values):
self._error(field, errors.ITEMS_LENGTH, len(items), len(values))
else:
schema = {i: definition for i, definition in enumerate(items)}
validator = self._get_child_validator(
document_crumb=field,
schema_crumb=(field, 'items'), # noqa: E501
schema=schema,
)
if not validator(
{i: value for i, value in enumerate(values)},
update=self.update,
normalize=False,
):
self._error(field, errors.ITEMS, validator._errors)
def _validate_itemsrules(self, rulesset, field, value):
""" {'type': ('dict', 'string'),
'check_with': 'rulesset'} """
if not isinstance(value, Sequence):
return
schema = {i: rulesset for i in range(len(value))}
validator = self._get_child_validator(
document_crumb=field,
schema_crumb=(field, 'itemsrules'),
schema=schema,
allow_unknown=self.allow_unknown,
)
validator(
{i: v for i, v in enumerate(value)}, update=self.update, normalize=False
)
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2])
self._error(field, errors.ITEMSRULES, validator._errors)
def __validate_logical(self, operator, definitions, field, value):
""" Validates value against all definitions and logs errors according
to the operator. """
valid_counter = 0
_errors = errors.ErrorList()
for i, definition in enumerate(definitions):
schema = {field: definition.copy()}
for rule in ('allow_unknown', 'type'):
if rule not in schema[field] and rule in self.schema[field]:
schema[field][rule] = self.schema[field][rule]
if 'allow_unknown' not in schema[field]:
schema[field]['allow_unknown'] = self.allow_unknown
validator = self._get_child_validator(
schema_crumb=(field, operator, i), schema=schema, allow_unknown=True
)
if validator(self.document, update=self.update, normalize=False):
valid_counter += 1
else:
self._drop_nodes_from_errorpaths(validator._errors, [], [3])
_errors.extend(validator._errors)
return valid_counter, _errors
def _validate_anyof(self, definitions, field, value):
""" {'type': 'Sequence', 'logical': 'anyof'} """
valids, _errors = self.__validate_logical('anyof', definitions, field, value)
if valids < 1:
self._error(field, errors.ANYOF, _errors, valids, len(definitions))
def _validate_allof(self, definitions, field, value):
""" {'type': 'Sequence', 'logical': 'allof'} """
valids, _errors = self.__validate_logical('allof', definitions, field, value)
if valids < len(definitions):
self._error(field, errors.ALLOF, _errors, valids, len(definitions))
def _validate_noneof(self, definitions, field, value):
""" {'type': 'Sequence', 'logical': 'noneof'} """
valids, _errors = self.__validate_logical('noneof', definitions, field, value)
if valids > 0:
self._error(field, errors.NONEOF, _errors, valids, len(definitions))
def _validate_oneof(self, definitions, field, value):
""" {'type': 'Sequence', 'logical': 'oneof'} """
valids, _errors = self.__validate_logical('oneof', definitions, field, value)
if valids != 1:
self._error(field, errors.ONEOF, _errors, valids, len(definitions))
def _validate_max(self, max_value, field, value):
""" {'nullable': False } """
try:
if value > max_value:
self._error(field, errors.MAX_VALUE)
except TypeError:
pass
def _validate_min(self, min_value, field, value):
""" {'nullable': False } """
try:
if value < min_value:
self._error(field, errors.MIN_VALUE)
except TypeError:
pass
def _validate_maxlength(self, max_length, field, value):
""" {'type': 'integer'} """
if isinstance(value, Iterable) and len(value) > max_length:
self._error(field, errors.MAX_LENGTH, len(value))
_validate_meta = dummy_for_rule_validation('')
def _validate_minlength(self, min_length, field, value):
""" {'type': 'integer'} """
if isinstance(value, Iterable) and len(value) < min_length:
self._error(field, errors.MIN_LENGTH, len(value))
def _validate_nullable(self, nullable, field, value):
""" {'type': 'boolean'} """
if value is None:
if not nullable:
self._error(field, errors.NULLABLE)
self._drop_remaining_rules(
'allowed',
'empty',
'forbidden',
'items',
'keysrules',
'min',
'max',
'minlength',
'maxlength',
'regex',
'schema',
'type',
'valuesrules',
)
def _validate_keysrules(self, schema, field, value):
""" {'type': ('Mapping', 'string'), 'check_with': 'rulesset',
'forbidden': ('rename', 'rename_handler')} """
if isinstance(value, Mapping):
validator = self._get_child_validator(
document_crumb=field,
schema_crumb=(field, 'keysrules'),
schema={k: schema for k in value.keys()},
)
if not validator({k: k for k in value.keys()}, normalize=False):
self._drop_nodes_from_errorpaths(validator._errors, [], [2, 4])
self._error(field, errors.KEYSRULES, validator._errors)
def _validate_readonly(self, readonly, field, value):
""" {'type': 'boolean'} """
if readonly:
if not self._is_normalized:
self._error(field, errors.READONLY_FIELD)
# If the document was normalized (and therefore already been
# checked for readonly fields), we still have to return True
# if an error was filed.
has_error = (
errors.READONLY_FIELD
in self.document_error_tree.fetch_errors_from(
self.document_path + (field,)
)
)
if self._is_normalized and has_error:
self._drop_remaining_rules()
def _validate_regex(self, pattern, field, value):
""" {'type': 'string'} """
if not isinstance(value, str):
return
if not pattern.endswith('$'):
pattern += '$'
re_obj = re.compile(pattern)
if not re_obj.match(value):
self._error(field, errors.REGEX_MISMATCH)
_validate_required = dummy_for_rule_validation(""" {'type': 'boolean'} """)
_validate_require_all = dummy_for_rule_validation(""" {'type': 'boolean'} """)
def __validate_required_fields(self, document):
""" Validates that required fields are not missing.
:param document: The document being validated.
"""
required = set(
field
for field, definition in self.schema.items()
if self._resolve_rules_set(definition).get('required', self.require_all)
is True
)
required -= self._unrequired_by_excludes
missing = required - set(
field
for field in document
if document.get(field) is not None or not self.ignore_none_values
)
for field in missing:
self._error(field, errors.REQUIRED_FIELD)
# At least one field from self._unrequired_by_excludes should be present in
# document.
if self._unrequired_by_excludes:
fields = set(field for field in document if document.get(field) is not None)
if self._unrequired_by_excludes.isdisjoint(fields):
for field in self._unrequired_by_excludes - fields:
self._error(field, errors.REQUIRED_FIELD)
def _validate_schema(self, schema, field, value):
""" {'type': ('Mapping', 'string'),
'check_with': 'schema'} """
if not isinstance(value, Mapping):
return
schema = self._resolve_schema(schema)
allow_unknown = self.schema[field].get('allow_unknown', self.allow_unknown)
require_all = self.schema[field].get('require_all', self.require_all)
validator = self._get_child_validator(
document_crumb=field,
schema_crumb=(field, 'schema'),
schema=schema,
allow_unknown=allow_unknown,
require_all=require_all,
)
if not validator(value, update=self.update, normalize=False):
self._error(field, errors.SCHEMA, validator._errors)
def _validate_type(self, data_type, field, value):
""" {'type': 'tuple',
'itemsrules': {
'oneof': (
{'type': 'string', 'check_with': 'type_names'},
{'type': ('type', 'generic_type_alias')}
)}} """
if not data_type:
return
for _type in data_type:
if isinstance(_type, str):
type_definition = self.types_mapping[_type]
if isinstance(value, type_definition.included_types) and not isinstance(
value, type_definition.excluded_types
):
return
else:
if isinstance(value, _type):
return
self._error(field, errors.TYPE)
self._drop_remaining_rules()
def _validate_valuesrules(self, schema, field, value):
""" {'type': ['dict', 'string'], 'check_with': 'rulesset',
'forbidden': ['rename', 'rename_handler']} """
schema_crumb = (field, 'valuesrules')
if isinstance(value, Mapping):
validator = self._get_child_validator(
document_crumb=field,
schema_crumb=schema_crumb,
schema={k: schema for k in value},
)
validator(value, update=self.update, normalize=False)
if validator._errors:
self._drop_nodes_from_errorpaths(validator._errors, [], [2])
self._error(field, errors.VALUESRULES, validator._errors)
| 38.144324
| 88
| 0.588051
|
54f9b0d71de0d522ab7a639e1d30bb456c31c463
| 1,098
|
py
|
Python
|
countries/income/HIC.py
|
vincihb/paper_database
|
f97ebdcc2bba3ecee3590cde12a5a5f71d26451d
|
[
"MIT"
] | null | null | null |
countries/income/HIC.py
|
vincihb/paper_database
|
f97ebdcc2bba3ecee3590cde12a5a5f71d26451d
|
[
"MIT"
] | null | null | null |
countries/income/HIC.py
|
vincihb/paper_database
|
f97ebdcc2bba3ecee3590cde12a5a5f71d26451d
|
[
"MIT"
] | null | null | null |
from countries.Countries import Countries
class HIC(Countries):
def __init__(self):
super().__init__()
self.lst_of_countries = ['Australia', 'Austria', 'Bahrain', 'Belgium', 'Brunei', 'Canada', 'Chile', 'Croatia', 'Czech Republic', 'Denmark', 'Estonia', 'Finland', 'France', 'Germany', 'Greece', 'Hungary', 'Iceland', 'Ireland', 'Israel', 'Italy', 'Japan', 'Latvia', 'Lithuania', 'Luxembourg', 'Netherlands', 'New Zealand', 'Norway', 'Oman', 'Poland', 'Portugal', 'Puerto Rico', 'Saudi Arabia', 'Singapore', 'Slovakia', 'Slovenia', 'South Korea', 'Spain', 'Sweden', 'Switzerland', 'Taiwan', 'Trinidad and Tobago', 'United Arab Emirates', 'United Kingdom', 'United States']
self.all_papers = self.get_all_papers()
if __name__ == "__main__":
a = HIC()
papers = a.all_papers
print(len(papers))
print(dict(sorted(a.get_themes_distribution().items(), key=lambda x: x[1], reverse=True)))
print(a.get_watercode_distribution())
# i = 0
# for paper in papers:
# if i == 20:
# break
# i = i + 1
# print(paper)
| 43.92
| 577
| 0.623862
|
7868d8ee0cf143890d1b9f53eacf1d8f485a7fe9
| 31,197
|
py
|
Python
|
nlplingo/tasks/train_test.sandbox.py
|
BBN-E/nlplingo
|
32ff17b1320937faa3d3ebe727032f4b3e7a353d
|
[
"Apache-2.0"
] | 3
|
2020-10-22T13:28:00.000Z
|
2022-03-24T19:57:22.000Z
|
nlplingo/tasks/train_test.sandbox.py
|
BBN-E/nlplingo
|
32ff17b1320937faa3d3ebe727032f4b3e7a353d
|
[
"Apache-2.0"
] | null | null | null |
nlplingo/tasks/train_test.sandbox.py
|
BBN-E/nlplingo
|
32ff17b1320937faa3d3ebe727032f4b3e7a353d
|
[
"Apache-2.0"
] | 1
|
2020-10-22T13:29:51.000Z
|
2020-10-22T13:29:51.000Z
|
import sys
import argparse
import json
import logging
import numpy as np
import torch
import torch.nn as nn
from torch import nn, optim, cuda
from torch.utils.data import TensorDataset, DataLoader
import torch.nn.functional as F
from nlplingo.tasks.event_domain import EventDomain
from nlplingo.nn.extractor import Extractor
from nlplingo.annotation.ingestion import prepare_docs
from nlplingo.tasks.eventtrigger.generator import EventTriggerExampleGenerator
from nlplingo.tasks.eventtrigger.run import generate_trigger_data_feature
from nlplingo.embeddings.word_embeddings import load_embeddings
from nlplingo.common.scoring import evaluate_f1
from nlplingo.common.scoring import print_score_breakdown
from nlplingo.common.scoring import write_score_to_file
from nlplingo.tasks.eventtrigger.run import get_predicted_positive_triggers
logger = logging.getLogger(__name__)
class Net(nn.Module):
def __init__(self, input_size, output_size):
super(Net, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.l1 = nn.Linear(input_size, 768)
self.l2 = nn.Linear(768, 768)
self.l3 = nn.Linear(768, output_size)
def forward(self, x):
"""
We need to flatten before we give `x` to the fully connected layer. So we tell PyTorch to reshape the tensor.
`x.view(-1, self.input_size)` tells PyTorch to use `input_size` as the number of columns,
but decide the number of rows by itself.
NOTE: `view` shares the underlying data with the original tensor, so it is really a view into the old tensor
instead of creating a brand new one
We return logits, i.e. we did not apply activation
"""
x = x.view(-1, self.input_size)
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
return self.l3(x) # return logits
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def train_trigger_multilayer(params, word_embeddings, trigger_extractor):
"""
:type params: dict
:type word_embeddings: dict[str:nlplingo.embeddings.word_embeddings.WordEmbedding]
:type trigger_extractor: nlplingo.nn.extractor.Extractor
#### data_list
This is generated by for example the `generate_trigger_data_feature()` method.
data_list.shape = (#features, #examples, #dimensions-for-this-feature)
For instance, if you have the following piece of code where each example has 2 features: 'trigger' and 'arg':
```
data = defaultdict(list)
x1_trigger = [0.1, 0.2, 0.22]
x2_trigger = [0.3, 0.4, 0.44]
x1_arg = [0.5, 0.6, 0.66]
x2_arg = [0.7, 0.8, 0.88]
data['trigger'].append(x1_trigger)
data['trigger'].append(x2_trigger)
data['arg'].append(x1_arg)
data['arg'].append(x2_arg)
data_list = [np.asarray(data[k]) for k in data]
```
Then doing `torch.from_numpy(np.array(data_list)).shape` gives `torch.Size([2, 2, 3])`
#### train_label
This is of shape (#examples, #labels)
So e.g. when you do `torch.from_numpy(np.array(train_label))`, and you get:
```
tensor([[-2.0, 0.1],
[ 0.7, 0.4],
[-1.5, -1.6]])
```
The above is for #examples=3 and #labels=2.
Then when you do `torch.from_numpy(np.array(train_label)).max(1)`, you get a tuple:
```
torch.return_types.max(
values=tensor([ 0.1, 0.7, -1.5]),
indices=tensor([1, 0, 0]))
```
And we can do `torch.from_numpy(np.array(train_label)).max(1)[1]` to get the indices `tensor([1, 0, 0])`
"""
feature_generator = trigger_extractor.feature_generator
""":type: nlplingo.event.trigger.feature.EventTriggerFeatureGenerator"""
example_generator = trigger_extractor.example_generator
""":type: nlplingo.event.trigger.generator.EventTriggerExampleGenerator"""
trigger_model = trigger_extractor.extraction_model
""":type: nlplingo.nn.trigger_model.TriggerModel"""
# logger.debug('type(feature_generator)={}'.format(type(feature_generator)))
# logger.debug('type(example_generator)={}'.format(type(example_generator)))
# logger.debug('type(trigger_model)={}'.format(type(trigger_model)))
# prepare dataset for sample generation
logger.info("Preparing docs")
train_docs = prepare_docs(params['data']['train']['filelist'], word_embeddings, params)
dev_docs = prepare_docs(params['data']['dev']['filelist'], word_embeddings, params)
logger.info("Applying domain")
for doc in train_docs + dev_docs:
doc.apply_domain(trigger_extractor.domain)
(train_examples, train_data, train_data_list, train_label) = (
generate_trigger_data_feature(example_generator, train_docs, feature_generator))
(dev_examples, dev_data, dev_data_list, dev_label) = (
generate_trigger_data_feature(example_generator, dev_docs, feature_generator))
"""
squeeze() returns a tensor with all dimensions of size 1 removed
Since `train_data_list` shape is: (#features, #examples, #dimensions-in-this-feature).
If we are just using a single feature (trigger window),
then after `squeeze()` we will be left with (#examples, #dimensions-in-this-feature), which is what is needed.
CAUTION: if you are using 2 features, e.g. (trigger_window, argument_window), then you need to further manipulate
`train_data_list` to be a 2-dimensional matrix of : (#examples, #features X #feature-dimensions)
"""
train_data = TensorDataset(torch.from_numpy(np.array(train_data_list)).squeeze(),
torch.from_numpy(np.array(train_label)).max(1)[1])
dev_data = TensorDataset(torch.from_numpy(np.array(dev_data_list)).squeeze(),
torch.from_numpy(np.array(dev_label)).max(1)[1])
input_size = len(np.array(train_data_list).squeeze()[0]) # number of input features in each example, e.g. 3072
output_size = len(dev_label[0]) # number of output labels/classes, e.g. 33
print('input_size=%d output_size=%d' % (input_size, output_size))
train_loader = DataLoader(train_data, batch_size=trigger_extractor.hyper_parameters.batch_size, shuffle=True)
dev_loader = DataLoader(dev_data, batch_size=trigger_extractor.hyper_parameters.batch_size, shuffle=False)
# ========= training
model = Net(input_size, output_size)
model.to(device)
criterion = nn.CrossEntropyLoss() # this does softmax on the target class, then negative-log
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
for epoch in range(trigger_extractor.hyper_parameters.epoch):
model.train()
for batch_index, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if (batch_index % 100) == 0:
N = len(train_loader.dataset)
N_seen = batch_index * len(data)
batch_proportion = 100. * batch_index / len(train_loader)
print('Train epoch {} | Batch status: {}/{} ({:.0f}%) | Loss: {:.6f}'.format(epoch, N_seen, N,
batch_proportion,
loss.item()))
# saving model file
torch.save(model.state_dict(), trigger_extractor.model_file)
# model.load_state_dict(torch.load(trigger_extractor.model_file))
predicted_positive_dev_triggers = evaluate(model, dev_loader, criterion, dev_examples, dev_label, trigger_extractor,
params['train.score_file'])
# if 'test' in params['data']:
# test_docs = prepare_docs(params['data']['test']['filelist'], word_embeddings, params)
#
# for doc in test_docs:
# doc.apply_domain(trigger_extractor.domain)
#
# # Generate data
# (test_examples, test_data, test_data_list, test_label) = (
# generate_trigger_data_feature(trigger_extractor.example_generator, test_docs, trigger_extractor.feature_generator))
#
# test_data = TensorDataset(torch.from_numpy(np.array(test_data_list)).squeeze(),
# torch.from_numpy(np.array(test_label)).max(1)[1])
# test_loader = DataLoader(test_data, batch_size=trigger_extractor.hyper_parameters.batch_size, shuffle=False)
#
# predicted_positive_test_triggers = evaluate_lstm(model, test_loader, criterion, test_examples, test_label, trigger_extractor, params['test.score_file'])
def test_trigger_multilayer(params, word_embeddings, trigger_extractor):
"""
:type params: dict
:type word_embeddings: nlplingo.embeddings.WordEmbedding
:type trigger_extractor: nlplingo.nn.extractor.Extractor
"""
test_docs = prepare_docs(params['data']['test']['filelist'], word_embeddings, params)
for doc in test_docs:
doc.apply_domain(trigger_extractor.domain)
feature_generator = trigger_extractor.feature_generator
""":type: nlplingo.event.trigger.feature.EventTriggerFeatureGenerator"""
example_generator = trigger_extractor.example_generator
""":type: nlplingo.event.trigger.generator.EventTriggerExampleGenerator"""
trigger_model = trigger_extractor.extraction_model
""":type: nlplingo.nn.trigger_model.TriggerModel"""
# Generate data
(test_examples, test_data, test_data_list, test_label) = (generate_trigger_data_feature(example_generator, test_docs, feature_generator))
test_data = TensorDataset(torch.from_numpy(np.array(test_data_list)).squeeze(), torch.from_numpy(np.array(test_label)).max(1)[1])
test_loader = DataLoader(test_data, batch_size=trigger_extractor.hyper_parameters.batch_size, shuffle=False)
input_size = len(np.array(test_data_list).squeeze()[0]) # number of input features in each example, e.g. 3072
output_size = len(test_label[0]) # number of output labels/classes, e.g. 33
print('input_size=%d output_size=%d' % (input_size, output_size))
model = Net(input_size, output_size)
model.to(device)
model.load_state_dict(torch.load(trigger_extractor.model_file))
criterion = nn.CrossEntropyLoss() # this does softmax on the target class, then negative-log
predicted_positive_test_triggers = evaluate(model, test_loader, criterion, test_examples, test_label, trigger_extractor,
params['test.score_file'])
class LstmNet(nn.Module):
def __init__(self, output_size, input_size, hidden_dim, n_layers, seq_len, drop_prob=0.5, bidirectional=False):
super(LstmNet, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.n_layers = n_layers
self.hidden_dim = hidden_dim
self.seq_len = seq_len
self.bidirectional = bidirectional
#self.embedding = nn.Embedding(vocab_size, embedding_dim)
# input_size hidden_size
#self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, dropout=drop_prob, batch_first=True, bidirectional=False)
self.lstm = nn.LSTM(input_size, hidden_dim, n_layers, batch_first=True, bidirectional=self.bidirectional)
self.dropout = nn.Dropout(0.2)
if self.bidirectional:
self.fc1 = nn.Linear(hidden_dim*2, hidden_dim*2)
else:
self.fc1 = nn.Linear(hidden_dim, hidden_dim)
self.init_weights(self.fc1)
if self.bidirectional:
self.fc2 = nn.Linear(hidden_dim*2, hidden_dim)
else:
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.init_weights(self.fc2)
self.fc3 = nn.Linear(hidden_dim, output_size)
self.init_weights(self.fc3)
self.sigmoid = nn.Sigmoid()
def forward(self, x, h):
# x.shape # => torch.Size([100, 50, 3072]) (batch-size, seq-len, input-size)
# h[0].shape # => torch.Size([1, 100, 768]) (num-layers, batch-size, hidden-size)
# h[1].shape # => torch.Size([1, 100, 768]) (num-layers, batch-size, hidden-size)
batch_size = x.size(0)
# you can also do the following
# ```
# h0 = torch.zeros(self.n_layers * 1, x.size(0), self.hidden_dim).to(device) # 2 for bidirection
# c0 = torch.zeros(self.n_layers * 1, x.size(0), self.hidden_dim).to(device)
# lstm_out, hidden = self.lstm(x, (h0, c0))
# ```
lstm_out, hidden = self.lstm(x, h)
"""
lstm_out.shape # => torch.Size([100, 50, 768]) , (batch-size, seq-len, hidden-size)
len(hidden) # => 2
hidden[0].shape # => torch.Size([1, 100, 768]) , final hidden state
hidden[1].shape # => torch.Size([1, 100, 768]) , final cell state
NOTE: lstm_out[5][-1] == hidden[0][0][5]
What this means is that lstm_out gives the hidden-state of (all examples in batch, every time sequence)
And lstm_out[5][-1] gets the LAST hidden-state of the 6th example in the batch
"""
if self.bidirectional:
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim*2)
else:
lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
fc1_out = F.relu(self.fc1(lstm_out))
fc2_out = F.relu(self.fc2(fc1_out))
d_out = self.dropout(fc2_out) # d_out.shape = torch.Size([5000, 768])
out = self.fc3(d_out) # logits , out.shape = torch.Size([5000, 34])
out = out.view(x.size(0), self.seq_len, -1) # out.shape = torch.Size([100, 50, 34])
return out, hidden
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
# next(self.parameters()).data.new() : grabbing the first parameter in the model and making a new tensor of the same type with specified dimensions.
n_layers = self.n_layers # you have to double this, if using biLSTM
#n_layers = 2
if self.bidirectional:
hidden = (weight.new(n_layers*2, batch_size, self.hidden_dim).zero_().to(device),
weight.new(n_layers*2, batch_size, self.hidden_dim).zero_().to(device))
else:
hidden = (weight.new(n_layers, batch_size, self.hidden_dim).zero_().to(device),
weight.new(n_layers, batch_size, self.hidden_dim).zero_().to(device))
return hidden
def init_weights(self, layer):
if type(layer) == nn.Linear:
print("Initiliaze layer with nn.init.xavier_uniform_: {}".format(layer))
torch.nn.init.xavier_uniform_(layer.weight)
layer.bias.data.fill_(0.01)
def predict_lstm(model, criterion, data_loader, examples, batch_size):
model.eval()
loss = 0
predictions = None
#h = model.init_hidden(batch_size)
with torch.no_grad():
for batch_index, (data, target) in enumerate(data_loader):
#h = tuple([e.data[:, 0:data.size(0), :] for e in h])
h = model.init_hidden(data.size(0))
data, target = data.to(device), target.to(device)
output, h = model(data, h)
output_head = None
for i in range(output.shape[0]):
eg = examples[batch_index * batch_size + i]
token_index = eg.anchor.head().index_in_sentence
if output_head is None:
output_head = torch.unsqueeze(output[i][token_index], dim=0)
else:
output_head = torch.cat((output_head, torch.unsqueeze(output[i][token_index], dim=0)))
loss += criterion(output_head, target)
# print('type(output_head)=', type(output_head)) # torch.Tensor
# print('output_head.shape=', output_head.shape) # torch.Size([100, 34])
# print('type(output_head.data)=', type(output_head.data)) # torch.Tensor
# print('output_head.data.shape=', output_head.data.shape) # torch.Size([100, 34])
# pred = output_head.data.max(1, keepdim=True)[1] # get index of max
if predictions is None:
predictions = output_head.data.cpu().numpy()
else:
predictions = np.vstack((predictions, output_head.data.cpu().numpy()))
# correct += pred.eq(target.data.view_as(pred)).cpu().sum()
loss /= len(data_loader.dataset)
print(f'=====\nAverage loss: {loss:.4f}')
return predictions
def train_trigger_lstm(params, word_embeddings, trigger_extractor, bidirectional=False):
"""
:type params: dict
:type word_embeddings: dict[str:nlplingo.embeddings.word_embeddings.WordEmbedding]
:type trigger_extractor: nlplingo.nn.extractor.Extractor
"""
# prepare dataset for sample generation
logger.info("Preparing docs")
train_docs = prepare_docs(params['data']['train']['filelist'], word_embeddings, params)
dev_docs = prepare_docs(params['data']['dev']['filelist'], word_embeddings, params)
logger.info("Applying domain")
for doc in train_docs + dev_docs:
doc.apply_domain(trigger_extractor.domain)
batch_size = trigger_extractor.hyper_parameters.batch_size
input_size = 3072
(train_examples, train_data, train_data_list, train_label) = (generate_trigger_data_feature(trigger_extractor.example_generator, train_docs, trigger_extractor.feature_generator))
#train_size = int(len(train_examples) / batch_size) * batch_size
#train_examples = train_examples[0:train_size]
#for i in range(len(train_data_list)):
# train_data_list[i] = train_data_list[i][:, :, 0:input_size]
#train_data_list[i] = train_data_list[i][0:train_size]
#print(train_data_list[i].shape) # [N, seq_len, input_size/num#_features]
#train_label = train_label[0:train_size]
# train_data_list[0].shape= (4300, 200, 3072)
print('type(train_label)=', type(train_label)) # np.ndarray
print('train_label.shape=', train_label.shape) # (4200, 34)
print('train_label=', train_label)
train_labels = []
for i in range(len(train_label)):
train_labels.append([np.argmax(train_label[i]), train_examples[i].anchor.head().index_in_sentence])
#train_labels.append((train_label[i], train_examples[i].anchor.head().index_in_sentence))
train_labels = np.array(train_labels)
print('type(train_labels)=', type(train_labels)) # np.ndarray
print('train_labels.shape=', train_labels.shape) # (4200, 2)
print('train_labels=', train_labels)
#for i in range(100):
# token_index = int(train_labels[i][1])
# print('train_labels[', i, ']=', train_labels[i], 'embeddings=', train_data_list[0][i][token_index][0], train_data_list[0][i][token_index][-1], ' trigger=', train_examples[i].anchor.head().text)
#a = torch.from_numpy(np.array(train_label)).max(1)[1]
#print(a) # tensor([33, 33, 33, ..., 33, 33, 33])
#print(type(a)) # torch.Tensor
#print(a.shape) # torch.Size([4200])
(dev_examples, dev_data, dev_data_list, dev_label) = (generate_trigger_data_feature(trigger_extractor.example_generator, dev_docs, trigger_extractor.feature_generator))
#dev_size = int(len(dev_examples) / batch_size) * batch_size
#dev_examples = dev_examples[0:dev_size]
#for i in range(len(dev_data_list)):
# dev_data_list[i] = dev_data_list[i][:, :, 0:input_size]
#dev_data_list[i] = dev_data_list[i][0:dev_size]
#dev_label = dev_label[0:dev_size]
#train_data = TensorDataset(torch.from_numpy(np.array(train_data_list)).squeeze(), torch.from_numpy(np.array(train_label)).max(1)[1])
train_data = TensorDataset(torch.from_numpy(np.array(train_data_list)).squeeze(), torch.from_numpy(train_labels))
dev_data = TensorDataset(torch.from_numpy(np.array(dev_data_list)).squeeze(), torch.from_numpy(np.array(dev_label)).max(1)[1])
#input_size = len(np.array(train_data_list).squeeze()[0])
output_size = len(train_label[0])
#output_size = len(dev_label[0])
#print('input_size=%d output_size=%d' % (input_size, output_size))
train_loader = DataLoader(train_data, batch_size=trigger_extractor.hyper_parameters.batch_size, shuffle=True)
dev_loader = DataLoader(dev_data, batch_size=trigger_extractor.hyper_parameters.batch_size, shuffle=False)
# ========= training
model = LstmNet(output_size, input_size, 512, 1, 50, bidirectional=bidirectional)
model.to(device)
criterion = nn.CrossEntropyLoss() # this is logSoftmax + nll_loss
optimizer = torch.optim.Adam(model.parameters(), lr=0.00001)
model.train()
#h = None
for epoch in range(trigger_extractor.hyper_parameters.epoch):
#h = model.init_hidden(batch_size)
for batch_index, (data, target) in enumerate(train_loader):
#print(data.size()) # [100, 50, 3072], i.e. (batch-size, seq-len, feature-input-size)
trigger_token_indices = target.data[:, 1].numpy()
labels = target.data[:, 0]
# Creating new variables for the hidden state, otherwise we'd backprop through the entire training history
#h = tuple([e.data[:, 0:data.size(0), :] for e in h])
h = model.init_hidden(data.size(0))
# h[0].shape = torch.Size([1, 100, 768]) (#layers, batch-size, hidden-dimension)
data, labels = data.to(device), labels.to(device)
optimizer.zero_grad()
output, h = model(data, h)
output_head = None
for i in range(output.shape[0]): # for each example in batch
token_index = trigger_token_indices[i]
if output_head is None:
output_head = torch.unsqueeze(output[i][token_index], dim=0)
else:
output_head = torch.cat((output_head, torch.unsqueeze(output[i][token_index], dim=0)))
# output_head.shape = torch.Size([100, 34]), i.e. (batch-size, #classes)
loss = criterion(output_head, labels)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 5) # TODO this should be a parameter
optimizer.step()
if (batch_index % 10) == 0:
N = len(train_loader.dataset)
N_seen = batch_index * len(data)
batch_proportion = 100. * batch_index / len(train_loader)
print('Train epoch {}/{} | Batch status: {}/{} ({:.0f}%) | Loss: {:.6f}'.format(epoch, trigger_extractor.hyper_parameters.epoch, N_seen, N, batch_proportion, loss.item()))
# saving model file
torch.save(model.state_dict(), trigger_extractor.model_file)
predictions = predict_lstm(model, criterion, dev_loader, dev_examples, batch_size)
# model.eval()
# loss = 0
# predictions = None
# h = model.init_hidden(batch_size)
# with torch.no_grad():
# for batch_index, (data, target) in enumerate(dev_loader):
# h = tuple([e.data[:, 0:data.size(0), :] for e in h])
# data, target = data.to(device), target.to(device)
# output, h = model(data, h)
#
# output_head = None
# for i in range(output.shape[0]):
# eg = dev_examples[batch_index * batch_size + i]
# token_index = eg.anchor.head().index_in_sentence
#
# if output_head is None:
# output_head = torch.unsqueeze(output[i][token_index], dim=0)
# else:
# output_head = torch.cat((output_head, torch.unsqueeze(output[i][token_index], dim=0)))
#
# loss += criterion(output_head, target)
#
# #print('type(output_head)=', type(output_head)) # torch.Tensor
# #print('output_head.shape=', output_head.shape) # torch.Size([100, 34])
# #print('type(output_head.data)=', type(output_head.data)) # torch.Tensor
# #print('output_head.data.shape=', output_head.data.shape) # torch.Size([100, 34])
# #pred = output_head.data.max(1, keepdim=True)[1] # get index of max
#
# if predictions is None:
# predictions = output_head.data.cpu().numpy()
# else:
# predictions = np.vstack((predictions, output_head.data.cpu().numpy()))
#
# #correct += pred.eq(target.data.view_as(pred)).cpu().sum()
#
# loss /= len(dev_loader.dataset)
# print(f'=====\nAverage loss: {loss:.4f}')
score, score_breakdown = evaluate_f1(predictions, dev_label, extractor.domain.get_event_type_index('None'))
logger.info(score.to_string())
print_score_breakdown(extractor, score_breakdown)
write_score_to_file(extractor, score, score_breakdown, params['train.score_file'])
#predicted_positive_dev_triggers = evaluate(model, dev_loader, criterion, dev_examples, dev_label, trigger_extractor, params['train.score_file'])
def test_trigger_lstm(params, word_embeddings, trigger_extractor, bidirectional=False):
"""
:type params: dict
:type word_embeddings: nlplingo.embeddings.WordEmbedding
:type trigger_extractor: nlplingo.nn.extractor.Extractor
"""
test_docs = prepare_docs(params['data']['test']['filelist'], word_embeddings, params)
for doc in test_docs:
doc.apply_domain(trigger_extractor.domain)
feature_generator = trigger_extractor.feature_generator
""":type: nlplingo.event.trigger.feature.EventTriggerFeatureGenerator"""
example_generator = trigger_extractor.example_generator
""":type: nlplingo.event.trigger.generator.EventTriggerExampleGenerator"""
trigger_model = trigger_extractor.extraction_model
""":type: nlplingo.nn.trigger_model.TriggerModel"""
# Generate data
(test_examples, test_data, test_data_list, test_label) = (generate_trigger_data_feature(example_generator, test_docs, feature_generator))
test_data = TensorDataset(torch.from_numpy(np.array(test_data_list)).squeeze(), torch.from_numpy(np.array(test_label)).max(1)[1])
test_loader = DataLoader(test_data, batch_size=trigger_extractor.hyper_parameters.batch_size, shuffle=False)
input_size = 3072
#print(np.array(test_data_list).squeeze().shape) # (4513, 50, 3072) , (#examples, seq-len, input-size)
output_size = len(test_label[0]) # number of output labels/classes, e.g. 33
print('input_size=%d output_size=%d' % (input_size, output_size))
model = LstmNet(output_size, input_size, 512, 1, 50, bidirectional=bidirectional)
model.to(device)
model.load_state_dict(torch.load(trigger_extractor.model_file))
criterion = nn.CrossEntropyLoss() # this does softmax on the target class, then negative-log
batch_size = trigger_extractor.hyper_parameters.batch_size
predictions = predict_lstm(model, criterion, test_loader, test_examples, batch_size)
score, score_breakdown = evaluate_f1(predictions, test_label, extractor.domain.get_event_type_index('None'))
logger.info(score.to_string())
print_score_breakdown(extractor, score_breakdown)
write_score_to_file(extractor, score, score_breakdown, params['test.score_file'])
def evaluate(model, data_loader, criterion, examples, labels, extractor, score_file):
model.eval()
loss = 0
correct = 0
predictions = None
for data, target in data_loader:
data, target = data.to(device), target.to(device)
output = model(data)
loss += criterion(output, target).item()
# keepdim=True makes output tensors of the same size as `input`,
# except in the dimension `dim` where they are of size 1
# In this case, output.data is a matrix of predicted probabilities (#examples, #labels)
pred_probs = output.data.max(1, keepdim=True)[0] # get max probabilities
pred = output.data.max(1, keepdim=True)[1] # get max indices
#print('type(output.data)', type(output.data))
#print('output.data.shape=', output.data.shape)
if predictions is None:
predictions = output.data.cpu().numpy()
else:
predictions = np.vstack((predictions, output.data.cpu().numpy()))
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
loss /= len(data_loader.dataset)
print(f'=====\nAverage loss: {loss:.4f}')
score, score_breakdown = evaluate_f1(predictions, labels, extractor.domain.get_event_type_index('None'))
logger.info(score.to_string())
print_score_breakdown(extractor, score_breakdown)
write_score_to_file(extractor, score, score_breakdown, score_file)
return get_predicted_positive_triggers(predictions, examples, extractor)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', required=True) # train_trigger, train_arg, test_trigger, test_arg
parser.add_argument('--params', required=True)
args = parser.parse_args()
with open(args.params) as f:
params = json.load(f)
print(json.dumps(params, sort_keys=True, indent=4))
# ==== loading of embeddings ====
embeddings = load_embeddings(params)
load_extractor_models_from_file = False
#if args.mode in {'test_trigger', 'test_argument', 'decode_trigger_argument', 'decode_trigger'}:
# load_extractor_models_from_file = True
trigger_extractors = []
argument_extractors = []
""":type: list[nlplingo.model.extractor.Extractor]"""
for extractor_params in params['extractors']:
extractor = Extractor(params, extractor_params, embeddings, load_extractor_models_from_file)
if extractor.model_type.startswith('event-trigger_'):
trigger_extractors.append(extractor)
elif extractor.model_type.startswith('event-argument_'):
argument_extractors.append(extractor)
else:
raise RuntimeError('Extractor model type: {} not implemented.'.format(extractor.model_type))
#if 'domain_ontology.scoring' in params:
# scoring_domain = EventDomain.read_domain_ontology_file(params.get_string('domain_ontology.scoring'), 'scoring')
#else:
# scoring_domain = None
if args.mode == 'train_trigger_multilayer':
train_trigger_multilayer(params, embeddings, trigger_extractors[0])
elif args.mode == 'test_trigger_multilayer':
test_trigger_multilayer(params, embeddings, trigger_extractors[0])
elif args.mode == 'train_trigger_lstm':
train_trigger_lstm(params, embeddings, trigger_extractors[0], bidirectional=False)
elif args.mode == 'test_trigger_lstm':
test_trigger_lstm(params, embeddings, trigger_extractors[0], bidirectional=False)
elif args.mode == 'train_trigger_bilstm':
train_trigger_lstm(params, embeddings, trigger_extractors[0], bidirectional=True)
elif args.mode == 'test_trigger_bilstm':
test_trigger_lstm(params, embeddings, trigger_extractors[0], bidirectional=True)
else:
raise RuntimeError('mode: {} is not implemented!'.format(args.mode))
| 46.424107
| 202
| 0.664134
|
760009cc6b0861e0cb394a4e0bf4632448cf7105
| 863
|
py
|
Python
|
Lang/Python/py_base/data_structure/graph/dict2list.py
|
Orig5826/Basics
|
582e74c83a2b654640fe7c47a1a385a8913cc466
|
[
"MIT"
] | 5
|
2018-03-09T13:51:11.000Z
|
2021-12-17T02:05:59.000Z
|
Lang/Python/py_base/data_structure/graph/dict2list.py
|
Orig5826/Basics
|
582e74c83a2b654640fe7c47a1a385a8913cc466
|
[
"MIT"
] | null | null | null |
Lang/Python/py_base/data_structure/graph/dict2list.py
|
Orig5826/Basics
|
582e74c83a2b654640fe7c47a1a385a8913cc466
|
[
"MIT"
] | null | null | null |
g0 = { 'A': ['B', 'C', 'F'],
'B': ['C', 'D'],
'C': ['D'],
'D': ['C'],
#'E': ['F', 'D'],
'E': ['D', 'F'],
'F': ['C']
}
def dict2list(dd):
ll = []
keys = dd.keys()
for key0 in keys:
ll_temp = []
for key1 in keys:
if key1 in dd[key0]:
ll_temp.append(1)
else:
ll_temp.append(0)
ll.append(ll_temp)
# print(ll)
return ll
tag = ['A','B','C','D','E','F']
g = [ [0,1,1,0,0,1],
[0,0,1,1,0,0],
[0,0,0,1,0,0],
[0,0,1,0,0,0],
[0,0,0,1,0,1],
[0,0,1,0,0,0],
]
def list2dict(ll):
dd = {}
for i in range(len(ll)):
ll_temp = []
for j in range(len(ll[0])):
if ll[i][j] == 1:
ll_temp.append(tag[j])
dd[tag[i]] = ll_temp
return dd
if __name__ == '__main__':
result = dict2list(g0)
if result == g:
print('正确')
else:
print('错误')
result = list2dict(g)
if result == g0:
print('正确')
else:
print('错误')
| 14.87931
| 31
| 0.473928
|
2bd6e2ff6d9cb75d3a8006d99e5de9fff54a99c5
| 1,238
|
py
|
Python
|
saleor/plugins/invoicing/plugin.py
|
ibutiti/saleor
|
fffe9a54c01aa07131102474dcb1519e0b59da74
|
[
"BSD-3-Clause"
] | 1
|
2021-05-18T17:20:08.000Z
|
2021-05-18T17:20:08.000Z
|
saleor/plugins/invoicing/plugin.py
|
Niranjoyyengkhom/saleor
|
4e6c4fe10476508e6b5c06fb1f38f9dc1dfc81a5
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/plugins/invoicing/plugin.py
|
Niranjoyyengkhom/saleor
|
4e6c4fe10476508e6b5c06fb1f38f9dc1dfc81a5
|
[
"CC-BY-4.0"
] | null | null | null |
from typing import Any, Optional
from uuid import uuid4
from django.core.files.base import ContentFile
from ...core import JobStatus
from ...invoice.models import Invoice
from ...order.models import Order
from ..base_plugin import BasePlugin
from .utils import generate_invoice_number, generate_invoice_pdf
class InvoicingPlugin(BasePlugin):
PLUGIN_ID = "mirumee.invoicing"
PLUGIN_NAME = "Invoicing"
DEFAULT_ACTIVE = True
PLUGIN_DESCRIPTION = "Built-in saleor plugin that handles invoice creation."
CONFIGURATION_PER_CHANNEL = False
def invoice_request(
self,
order: "Order",
invoice: "Invoice",
number: Optional[str],
previous_value: Any,
) -> Any:
invoice.update_invoice(number=generate_invoice_number())
file_content, creation_date = generate_invoice_pdf(invoice)
invoice.created = creation_date
invoice.invoice_file.save(
f"invoice-{invoice.number}-order-{order.id}-{uuid4()}.pdf",
ContentFile(file_content),
)
invoice.status = JobStatus.SUCCESS
invoice.save(
update_fields=["created", "number", "invoice_file", "status", "updated_at"]
)
return invoice
| 31.74359
| 87
| 0.68336
|
ec34d7bec9a91823274ea62013efc4f64805b4a5
| 141
|
py
|
Python
|
vmraid/desk/form/__init__.py
|
sowrisurya/vmraid
|
f833e00978019dad87af80b41279c0146c063ed5
|
[
"MIT"
] | null | null | null |
vmraid/desk/form/__init__.py
|
sowrisurya/vmraid
|
f833e00978019dad87af80b41279c0146c063ed5
|
[
"MIT"
] | null | null | null |
vmraid/desk/form/__init__.py
|
sowrisurya/vmraid
|
f833e00978019dad87af80b41279c0146c063ed5
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, VMRaid Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
| 28.2
| 68
| 0.794326
|
2db96d0ed5008101a4f2757537d199eaeab421eb
| 2,805
|
py
|
Python
|
pyNastran/bdf/mesh_utils/dev/create_vectorized_numbered.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 293
|
2015-03-22T20:22:01.000Z
|
2022-03-14T20:28:24.000Z
|
pyNastran/bdf/mesh_utils/dev/create_vectorized_numbered.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 512
|
2015-03-14T18:39:27.000Z
|
2022-03-31T16:15:43.000Z
|
pyNastran/bdf/mesh_utils/dev/create_vectorized_numbered.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 136
|
2015-03-19T03:26:06.000Z
|
2022-03-25T22:14:54.000Z
|
"""
defines:
- FakeBDFVectorized
"""
from pyNastran.bdf.bdf import BDF as BDF
class FakeBDFVectorized(BDF):
"""
Renumbers the element ids to be in the same order as the BDFVectorized
Intended for GUI testing, not anything serious.
"""
#def __init__(self, debug=True, log=None, mode='msc'):
#"""see ``BDF.read_bdf``"""
#BDF.__init__(self, debug=debug, log=log, mode=mode)
def read_bdf(self, bdf_filename=None, validate=True, xref=True,
punch=False, read_includes=True,
encoding=None):
"""see ``BDF.read_bdf``"""
BDF.read_bdf(self, bdf_filename=bdf_filename, validate=validate, xref=False,
punch=punch, read_includes=read_includes,
encoding=encoding)
# not done
etypes = [
'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4',
'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CDAMP5', 'CVISC',
'CBUSH', 'CBUSH1D', 'CBUSH2D',
'CONROD', 'CROD', 'CTUBE', 'PLOTEL',
'CBAR', 'CBEAM', 'CBEND',
'CSHEAR',
'CTRIA3', 'CQUAD4', 'CTRIA6', 'CQUAD8', 'CTRIAR', 'CQUADR',
'CTETRA', 'CPENTA', 'CHEXA', 'CPYRAM',
'CHBDYG', 'CHBDYE', 'CHBDYP',
]
eid0 = 1
#eid_map = {}
elements2 = {}
for etype in etypes:
if etype in self._type_to_id_map:
eids = self._type_to_id_map[etype]
print(etype, eids)
for eid in eids:
#eid_map[eid0] = eid
if etype == 'PLOTEL':
element = self.plotels[eid]
else:
element = self.elements[eid]
element.eid = eid0
print(element)
elements2[eid0] = element
#self.elements[eid] =
eid0 += 1
failed_types = set()
for elem in self.elements.values():
if elem.type not in etypes:
failed_types.add(elem.type)
self.elements = elements2
self.plotels = {}
if failed_types:
msg = 'The following types were not converted and may result in a bad deck\n'
msg += ' %s' % failed_types
self.log.warning(msg)
# loads are not currently supported
self.loads = {} # type: Dict[int, List[Any]]
self.load_combinations = {} # type: Dict[int, List[Any]]
def create_vectorized_numbered(bdf_filename_in, bdf_filename_out, debug=True):
model = FakeBDFVectorized()
model.read_bdf(bdf_filename=bdf_filename_in, validate=True, xref=False, punch=False,
read_includes=True, encoding=None)
model.write_bdf(bdf_filename_out)
| 35.506329
| 89
| 0.534046
|
7167a981a7ab8259091e3ed40b2174968dc1aa95
| 8,872
|
py
|
Python
|
deepxml/libs/collate_fn.py
|
ethen8181/deepxml
|
81dedfbe8d67170569a9a1bff989a74b4e7da859
|
[
"MIT"
] | 41
|
2021-03-11T22:15:53.000Z
|
2022-03-29T00:53:09.000Z
|
deepxml/libs/collate_fn.py
|
ethen8181/deepxml
|
81dedfbe8d67170569a9a1bff989a74b4e7da859
|
[
"MIT"
] | 9
|
2021-06-16T02:05:56.000Z
|
2022-01-18T08:41:25.000Z
|
deepxml/libs/collate_fn.py
|
ethen8181/deepxml
|
81dedfbe8d67170569a9a1bff989a74b4e7da859
|
[
"MIT"
] | 9
|
2021-06-11T09:34:21.000Z
|
2022-01-23T01:50:44.000Z
|
import torch
import numpy as np
from torch.nn.utils.rnn import pad_sequence
def pad_and_collate(x, pad_val=0, dtype=torch.FloatTensor):
"""
A generalized function for padding batch using utils.rnn.pad_sequence
* pad as per the maximum length in the batch
* returns a collated tensor
Arguments:
---------
x: iterator
iterator over np.ndarray that needs to be converted to
tensors and padded
pad_val: float
pad tensor with this value
will cast the value as per the data type
dtype: datatype, optional (default=torch.FloatTensor)
tensor should be of this type
"""
return pad_sequence([torch.from_numpy(z) for z in x],
batch_first=True, padding_value=pad_val).type(dtype)
def collate_dense(x, dtype=torch.FloatTensor):
"""
Collate dense documents/labels and returns
Arguments:
---------
x: iterator
iterator over np.ndarray that needs to be converted to
tensors and padded
dtype: datatype, optional (default=torch.FloatTensor)
features should be of this type
"""
return torch.stack([torch.from_numpy(z) for z in x], 0).type(dtype)
def collate_sparse(x, pad_val=0.0, has_weight=False, dtype=torch.FloatTensor):
"""
Collate sparse documents
* Can handle with or without weights
* Expects an iterator over tuples if has_weight=True
Arguments:
---------
x: iterator
iterator over data points which can be
np.array or tuple of np.ndarray depending on has_weight
pad_val: list or float, optional, default=(0.0)
padding value for indices and weights
* expects a list when has_weight=True
has_weight: bool, optional, default=False
If entries have weights
* True: objects are tuples of np.ndarrays
0: indices, 1: weights
* False: objects are np.ndarrays
dtypes: list or dtype, optional (default=torch.FloatTensor)
dtypes of indices and values
* expects a list when has_weight=True
"""
weights = None
if has_weight:
x = list(x)
indices = pad_and_collate(map(lambda z: z[0], x), pad_val[0], dtype[0])
weights = pad_and_collate(map(lambda z: z[1], x), pad_val[1], dtype[1])
else:
indices = pad_and_collate(x, pad_val, dtype)
return indices, weights
def get_iterator(x, ind=None):
if ind is None:
return map(lambda z: z, x)
else:
return map(lambda z: z[ind], x)
def construct_collate_fn(feature_type, classifier_type, num_partitions=1):
def _collate_fn_dense_full(batch):
return collate_fn_dense_full(batch, num_partitions)
def _collate_fn_dense(batch):
return collate_fn_dense(batch)
def _collate_fn_sparse(batch):
return collate_fn_sparse(batch)
def _collate_fn_dense_sl(batch):
return collate_fn_dense_sl(batch, num_partitions)
def _collate_fn_sparse_full(batch):
return collate_fn_sparse_full(batch, num_partitions)
def _collate_fn_sparse_sl(batch):
return collate_fn_sparse_sl(batch, num_partitions)
if feature_type == 'dense':
if classifier_type == 'None':
return _collate_fn_dense
elif classifier_type == 'shortlist':
return _collate_fn_dense_sl
else:
return _collate_fn_dense_full
else:
if classifier_type == 'None':
return _collate_fn_sparse
elif classifier_type == 'shortlist':
return _collate_fn_sparse_sl
else:
return _collate_fn_sparse_full
def collate_fn_sparse_sl(batch, num_partitions):
"""
Combine each sample in a batch with shortlist
For sparse features
"""
_is_partitioned = True if num_partitions > 1 else False
batch_data = {'batch_size': len(batch), 'X_ind': None}
batch_data['batch_size'] = len(batch)
batch_data['X_ind'], batch_data['X'] = collate_sparse(
get_iterator(batch, 0), pad_val=[0, 0.0], has_weight=True,
dtype=[torch.LongTensor, torch.FloatTensor])
z = list(get_iterator(batch, 1))
if _is_partitioned:
batch_data['Y_s'] = [collate_dense(
get_iterator(get_iterator(z, 0), idx), dtype=torch.LongTensor)
for idx in range(num_partitions)]
batch_data['Y'] = [collate_dense(
get_iterator(get_iterator(z, 1), idx), dtype=torch.FloatTensor)
for idx in range(num_partitions)]
batch_data['Y_sim'] = [collate_dense(
get_iterator(get_iterator(z, 2), idx), dtype=torch.FloatTensor)
for idx in range(num_partitions)]
batch_data['Y_mask'] = [collate_dense(
get_iterator(get_iterator(z, 3), idx), dtype=torch.BoolTensor)
for idx in range(num_partitions)]
batch_data['Y_map'] = collate_dense(
get_iterator(z, 4), dtype=torch.LongTensor)
else:
batch_data['Y_s'] = collate_dense(
get_iterator(z, 0), dtype=torch.LongTensor)
batch_data['Y'] = collate_dense(
get_iterator(z, 1), dtype=torch.FloatTensor)
batch_data['Y_sim'] = collate_dense(
get_iterator(z, 2), dtype=torch.FloatTensor)
batch_data['Y_mask'] = collate_dense(
get_iterator(z, 3), dtype=torch.BoolTensor)
return batch_data
def collate_fn_dense_sl(batch, num_partitions):
"""
Combine each sample in a batch with shortlist
For dense features
"""
_is_partitioned = True if num_partitions > 1 else False
batch_data = {'batch_size': len(batch), 'X_ind': None}
batch_data['X'] = collate_dense(get_iterator(batch, 0))
z = list(get_iterator(batch, 1))
if _is_partitioned:
batch_data['Y_s'] = [collate_dense(
get_iterator(get_iterator(z, 0), idx), dtype=torch.LongTensor)
for idx in range(num_partitions)]
batch_data['Y'] = [collate_dense(
get_iterator(get_iterator(z, 1), idx), dtype=torch.FloatTensor)
for idx in range(num_partitions)]
batch_data['Y_sim'] = [collate_dense(
get_iterator(get_iterator(z, 2), idx), dtype=torch.FloatTensor)
for idx in range(num_partitions)]
batch_data['Y_mask'] = [collate_dense(
get_iterator(get_iterator(z, 3), idx), dtype=torch.BoolTensor)
for idx in range(num_partitions)]
batch_data['Y_map'] = collate_dense(
get_iterator(z, 4), dtype=torch.LongTensor)
else:
batch_data['Y_s'] = collate_dense(
get_iterator(z, 0), dtype=torch.LongTensor)
batch_data['Y'] = collate_dense(
get_iterator(z, 1), dtype=torch.FloatTensor)
batch_data['Y_sim'] = collate_dense(
get_iterator(z, 2), dtype=torch.FloatTensor)
batch_data['Y_mask'] = collate_dense(
get_iterator(z, 3), dtype=torch.BoolTensor)
return batch_data
def collate_fn_dense_full(batch, num_partitions):
"""
Combine each sample in a batch
For dense features
"""
_is_partitioned = True if num_partitions > 1 else False
batch_data = {'batch_size': len(batch), 'X_ind': None}
batch_data['X'] = collate_dense(get_iterator(batch, 0))
if _is_partitioned:
batch_data['Y'] = [collate_dense(
get_iterator(get_iterator(batch, 1), idx))
for idx in range(self.num_partitions)]
else:
batch_data['Y'] = collate_dense(get_iterator(batch, 1))
return batch_data
def collate_fn_sparse_full(batch, num_partitions):
"""
Combine each sample in a batch
For sparse features
"""
_is_partitioned = True if num_partitions > 1 else False
batch_data = {'batch_size': len(batch), 'X_ind': None}
batch_data['X_ind'], batch_data['X'] = collate_sparse(
get_iterator(batch, 0), pad_val=[0, 0.0], has_weight=True,
dtype=[torch.LongTensor, torch.FloatTensor])
if _is_partitioned:
batch_data['Y'] = [collate_dense(
get_iterator(get_iterator(batch, 1), idx))
for idx in range(self.num_partitions)]
else:
batch_data['Y'] = collate_dense(get_iterator(batch, 1))
return batch_data
def collate_fn_sparse(batch):
"""
Combine each sample in a batch
For sparse features
"""
batch_data = {'batch_size': len(batch), 'X_ind': None}
batch_data['X_ind'], batch_data['X'] = collate_sparse(
get_iterator(batch), pad_val=[0, 0.0], has_weight=True,
dtype=[torch.LongTensor, torch.FloatTensor])
return batch_data
def collate_fn_dense(batch):
"""
Combine each sample in a batch
For sparse features
"""
batch_data = {'batch_size': len(batch), 'X_ind': None}
batch_data['X'] = collate_dense(get_iterator(batch))
return batch_data
| 35.488
| 79
| 0.645401
|
169108c1272c740c0b15bbd8b8e7d3608734d100
| 957
|
py
|
Python
|
lexicon/tests/providers/test_safedns.py
|
tlusser-inv/lexicon
|
700d9912fb4628414dae1f7b9783837eb8d796e0
|
[
"MIT"
] | null | null | null |
lexicon/tests/providers/test_safedns.py
|
tlusser-inv/lexicon
|
700d9912fb4628414dae1f7b9783837eb8d796e0
|
[
"MIT"
] | null | null | null |
lexicon/tests/providers/test_safedns.py
|
tlusser-inv/lexicon
|
700d9912fb4628414dae1f7b9783837eb8d796e0
|
[
"MIT"
] | null | null | null |
"""Test for one implementation of the interface"""
from unittest import TestCase
import pytest
from lexicon.tests.providers.integration_tests import IntegrationTests
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from integration_tests.IntegrationTests
class SafednsProviderTests(TestCase, IntegrationTests):
"""Integration tests for SafeDNS provider"""
provider_name = 'safedns'
domain = 'lexicon.tests'
def _filter_headers(self):
return ['Authorization']
@pytest.mark.skip(reason="Record-level TTLs are not supported by this provider")
def test_provider_when_calling_list_records_after_setting_ttl(self):
return
@pytest.mark.skip(reason="CNAME requires FQDN for this provider")
def test_provider_when_calling_create_record_for_CNAME_with_valid_name_and_content(self):
return
| 39.875
| 93
| 0.783699
|
40cd6691bf827aa3b0ce4a567a2fdb01d328b40f
| 14,844
|
py
|
Python
|
codes/dgmpm_stability/2DEuler_cfl.py
|
adRenaud/research
|
2f0062a1800d7a17577bbfc2393b084253d567f4
|
[
"MIT"
] | 1
|
2021-06-18T14:52:03.000Z
|
2021-06-18T14:52:03.000Z
|
codes/dgmpm_stability/2DEuler_cfl.py
|
adRenaud/research
|
2f0062a1800d7a17577bbfc2393b084253d567f4
|
[
"MIT"
] | 1
|
2019-01-07T13:11:11.000Z
|
2019-01-07T13:11:11.000Z
|
codes/dgmpm_stability/2DEuler_cfl.py
|
adRenaud/research
|
2f0062a1800d7a17577bbfc2393b084253d567f4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import numpy as np
from scipy import optimize
from sympy import *
import matplotlib.pyplot as plt
import pdb
# def evalResidual(point,S,Sp,CFL):
# Res=0.
# if S.shape[0]==1:
# S1=[S[0,0]]
# S2=[S[0,1]]
# Sum1=np.sum(S1) ; Sum2=np.sum(S2)
# Nmp=1
# else:
# S1=np.asarray(S[0,:])[0]
# S2=np.asarray(S[1,:])[0]
# Sum1=np.sum(S1) ; Sum2=np.sum(S2)
# Nmp=len(S1)
# if Sp.shape[0]==1:
# Sp1=[Sp[0,0]]
# Sp2=[Sp[0,0]]
# Sump1=np.sum(Sp1) ; Sump2=np.sum(Sp2)
# Nmpp=1
# else:
# Sp1=np.asarray(Sp[0,:])[0]
# Sp2=np.asarray(Sp[1,:])[0]
# Sump1=np.sum(Sp1) ; Sump2=np.sum(Sp2)
# Nmpp=len(Sp1)
# # Sum over material points in curent cell
# for p in range(Nmp):
# ## First order contributions
# D_mu = S1[point]*S1[p]/Sum1 + S2[point]*S2[p]/Sum2 + CFL*( S2[point]/Sum2 - S1[point]/Sum1 -Nmp*S2[point]*S2[p]/(Sum2**2) )
# ## Second order contributions
# D_mu += 0.5*Nmp*(CFL**2)*((S2[p]/Sum2)*(S1[point]/Sum1-S2[point]/Sum2) + (S2[point]/(Sum2**2))*(Nmp*S2[p]/Sum2-1.) )
# Res = Res +np.abs(D_mu)
# # Sum over material points in previous cell
# for p in range(Nmpp):
# ## First order contributions
# D_mu = CFL*Nmp*Sp2[p]*S1[point]/(Sum1*Sump2)
# ## Second order contributions
# D_mu +=0.5*Nmp*(CFL**2)*( S1[point]/(Sum1*Sump2)*(1.-Nmpp*Sp2[p]/Sump2) -(Sp2[p]/Sump2)*(S1[point]/Sum1-S2[point]/Sum2) )
# Res=Res + np.abs(D_mu)
# return Res-1.
# Symbolic function to evaluate shape functions
shape_functions=lambda x,y: np.array([(1.-x)*(1.-y)/4.,(1.+x)*(1.-y)/4.,(1.+x)*(1.+y)/4.,(1.-x)*(1.+y)/4.])
grad_xi=lambda y:np.array([-(1.-y)/4.,(1.-y)/4.,(1.+y)/4.,-(1.+y)/4.])
grad_eta=lambda x:np.array([-(1.-x)/4.,-(1.+x)/4.,(1.+x)/4.,(1.-x)/4.])
# shapes=| N1(Xp1) N1(Xp2) ... N1(XNp) |
# | N2(Xp1) N2(Xp2) ... N2(XNp) |
# | N3(Xp1) N3(Xp2) ... N3(XNp) |
# | N4(Xp1) N4(Xp2) ... N4(XNp) |
# grad_z=| N1_z(Xp1) N1_z(Xp2) ... N1_z(XNp) |
# | N2_z(Xp1) N2_z(Xp2) ... N2_z(XNp) |
# | N3_z(Xp1) N3_z(Xp2) ... N3_z(XNp) |
# | N4_z(Xp1) N4_z(Xp2) ... N4_z(XNp) |
# where Ni(Xj) is the shape function of node i evaluated at the jth particles position
def symbolResidual(point,cx,cy,XC,XB,XL,XBL=0):
transverse=True
if XBL==0: transverse=False
shapesC=shape_functions(XC[0],XC[1])
dSxi_C=grad_xi(XC[1])
dSeta_C=grad_eta(XC[0])
shapesB=shape_functions(XB[0],XB[1])
dSxi_B=grad_xi(XB[1])
dSeta_B=grad_eta(XB[0])
shapesL=shape_functions(XL[0],XL[1])
dSxi_L=grad_xi(XL[1])
dSeta_L=grad_eta(XL[0])
## Number of material points in cells
NmpC=len(XC[0])
NmpL=len(XL[0])
NmpB=len(XB[0])
if XBL!=0:
shapesBL=shape_functions(XL[0],XL[1])
dSxi_BL=grad_xi(XBL[1])
dSeta_BL=grad_eta(XBL[0])
NmpBL=len(XBL[0])
else:
NmpBL=0
dt = symbols('dt')
## sum_i^K = np.sum(shapesK[i,:]) with cell K and node i
## shape functions evaluated at edges centers to weight fluxes contributions
## o -- 3 -- o
## | |
## 4 2
## | |
## o -- 1 -- o
shapeOnEdge=shape_functions(np.array([0.,1.,0.,-1.]),np.array([-1.,0.,1.,0.]))
## Define the normal to edges
Nx=np.array([0.,1.,0.,-1.])
Ny=np.array([-1.,0.,1.,0.])
Nnodes=4
Nedges=4
Res=0.
for P in range(NmpC):
## Contributions of material points sharing the same cell
D_PI=0.
for i in range(Nnodes):
# 0th-order contributions
wheightings=shapesC[i,point]/np.sum(shapesC[i,:])
D_PI+=wheightings*shapesC[i,P]
# 1st-order contributions
for j in range(Nnodes):
D_PI+=dt*wheightings*(shapesC[j,P]/np.sum(shapesC[j,:]))*(cx*np.dot(dSxi_C[i,:],shapesC[j,:]) + cy*np.dot(dSeta_C[i,:],shapesC[j,:]))
# Contributions of edges 2 and 3
D_PI-=0.25*dt*wheightings*shapeOnEdge[i,1]*NmpC*cx*(shapesC[1,P]/np.sum(shapesC[1,:])+shapesC[2,P]/np.sum(shapesC[2,:]))
D_PI-=0.25*dt*wheightings*shapeOnEdge[i,2]*NmpC*cx*(shapesC[2,P]/np.sum(shapesC[2,:])+shapesC[3,P]/np.sum(shapesC[3,:]))
# Transverse contributions
if transverse:
D_PI+= (0.25*dt)**2*wheightings*shapeOnEdge[i,1]*NmpC*cx*cy*(shapesC[0,P]/np.sum(shapesC[0,:])+shapesC[1,P]/np.sum(shapesC[1,:]))
D_PI+= (0.25*dt)**2*wheightings*shapeOnEdge[i,2]*NmpC*cx*cy*(shapesC[0,P]/np.sum(shapesC[0,:])+shapesC[3,P]/np.sum(shapesC[3,:]))
Res+=np.abs(D_PI)
## Contributions of material points of left cell
for P in range(NmpL):
D_PI=0.
for i in range(Nnodes):
wheightings=shapesC[i,point]/np.sum(shapesC[i,:])
## edge 4 contribution
D_PI+= 0.25*dt*wheightings*shapeOnEdge[i,3]*NmpC*cx*(shapesL[2,P]/np.sum(shapesL[2,:])+shapesL[3,P]/np.sum(shapesL[3,:]))
if transverse:
D_PI-=(0.25*dt)**2*wheightings*shapeOnEdge[i,3]*NmpC*cx*cy*(shapesL[0,P]/np.sum(shapesL[0,:])+shapesL[1,P]/np.sum(shapesL[1,:]))
## edge 3 contribution
D_PI-=(0.25*dt)**2*wheightings*shapeOnEdge[i,2]*NmpC*cy*cx*(shapesL[1,P]/np.sum(shapesL[1,:])+shapesL[2,P]/np.sum(shapesL[2,:]))
Res+=np.abs(D_PI)
## Contributions of material points of bottom cell
for P in range(NmpB):
D_PI=0.
for i in range(Nnodes):
wheightings=shapesC[i,point]/np.sum(shapesC[i,:])
## edge 1 contribution
D_PI+= 0.25*dt*wheightings*shapeOnEdge[i,0]*NmpC*cy*(shapesB[2,P]/np.sum(shapesB[2,:])+shapesB[3,P]/np.sum(shapesB[3,:]))
if transverse:
D_PI-=(0.25*dt)**2*wheightings*shapeOnEdge[i,0]*NmpC*cy*cx*(shapesB[0,P]/np.sum(shapesB[0,:])+shapesB[3,P]/np.sum(shapesB[3,:]))
## edge 2 contribution
D_PI-=(0.25*dt)**2*wheightings*shapeOnEdge[i,1]*NmpC*cx*cy*(shapesB[2,P]/np.sum(shapesB[2,:])+shapesB[3,P]/np.sum(shapesB[3,:]))
Res+=np.abs(D_PI)
## Contributions of material points of bottom-left cell
for P in range(NmpBL):
D_PI=0.
for i in range(Nnodes):
wheightings=shapesC[i,point]/np.sum(shapesC[i,:])
## edge 1 contribution
D_PI+=(0.25*dt)**2*wheightings*shapeOnEdge[i,0]*NmpC*cy*cx*(shapesBL[1,P]/np.sum(shapesBL[1,:])+shapesBL[2,P]/np.sum(shapesBL[2,:]))
## edge 4 contribution
D_PI+=(0.25*dt)**2*wheightings*shapeOnEdge[i,3]*NmpC*cx*cy*(shapesBL[2,P]/np.sum(shapesBL[2,:])+shapesBL[3,P]/np.sum(shapesBL[3,:]))
Res+=np.abs(D_PI)
Residual = lambdify((dt),Res-1.)
return Residual
def rootFinder(function,tol=1.e-12):
NiterMax=1000
# Find the bigest root of the residual by dichotomy algorithm
a0=function(0.)
a1=function(1.)
it=0
#pdb.set_trace()
if a0*a1>tol:
print "No solution can be found within the [0,1]"
while (a1-a0)>tol:
it+=1
a2=0.5*(a0+a1)
if function(a2)<1.e-7:
a0=a2
else:
a1=a2
if (a1-a0)<tol:
return a0
if it == NiterMax :
print "Solution not converged yet"
return a0
def gridSearch(function,tol=1.e-7):
samples=500000
# Find the bigest root of the residual by grid search algorithm
CFL=np.linspace(0.,1.,samples)
for i in CFL:
if i==samples-1:return i
a0=function(i)
if a0<tol:
continue
else:
return i
cx=20.;cy=20.
print "Speeds: cx/cy=",cx/cy
############### 1PPC
print "**************************************************************"
print "****************** 1PPC discretization **********************"
print "**************************************************************"
# Local coordinates of material points in current element
Xp=np.array([0.])
Yp=np.array([0.])
solution=optimize.fsolve(symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp)),1.)
CFL=max(cx,cy)*solution/2.
print "Solution DCU is: ",CFL,cx*solution/2. + cy*solution/2.
Residual=symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp))
CFL=np.linspace(0.,1.,100.)
res=np.zeros(len(CFL))
for i in range(len(CFL)):
res[i]=Residual(2.*CFL[i]/max(cx,cy))
plt.plot(CFL,res)
plt.grid()
plt.show()
Residual=symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp),(Xp,Yp))
# solution=optimize.root(Residual,1.,method='hybr',options={'xtol':1.e-12}).x
# print solution
solution=optimize.fsolve(Residual,1.)
CFL=max(cx,cy)*solution/2.
print "Solution CTU is: ",CFL
CFL=np.linspace(0.,1.,100.)
res=np.zeros(len(CFL))
for i in range(len(CFL)):
res[i]=Residual(2.*CFL[i]/max(cx,cy))
plt.plot(CFL,res)
plt.grid()
plt.show()
pdb.set_trace()
cx=20.;cy=20.
print "Speeds: cx/cy=",cx/cy
# ############### 2PPC
# print "**************************************************************"
# print "****************** 2PPC discretization **********************"
# print "**************************************************************"
# print "=== Symmetric horizontal ==="
# Xp=np.array([-0.5,0.5])
# Yp=np.array([0.,0.])
# residual=symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp))
# solutionN=optimize.newton(symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp)),1.)
# solution=gridSearch(residual)
# CFL=max(cx,cy)*solution/2.
# print "Solution DCU is: ",CFL, " (Newton ", 0.5*max(cx,cy)*solutionN,")"
# residual=symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp),(Xp,Yp))
# solution=gridSearch(residual)
# solutionN=optimize.newton(residual,1.)
# CFL=(max(cx,cy)*solution/2.)
# print "Solution CTU is: ",CFL, " (Newton ",0.5*max(cx,cy)*solutionN,")"
# print " "
# print "=== Symmetric vertical ==="
# Yp=np.array([-0.5,0.5])
# Xp=np.array([0.,0.])
# residual=symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp))
# solutionN=optimize.newton(symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp)),1.)
# solution=gridSearch(residual)
# CFL=max(cx,cy)*solution/2.
# print "Solution DCU is: ",CFL, " (Newton ", 0.5*max(cx,cy)*solutionN,")"
# residual=symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp),(Xp,Yp))
# solutionN=optimize.newton(residual,1.)
# solution=gridSearch(residual)
# CFL=max(cx,cy)*solution/2.
# print "Solution CTU is: ",CFL, " (Newton ",0.5*max(cx,cy)*solutionN,")"
# print " "
# print "=== Shifted ==="
# print "=== Symmetric horizontal ==="
# Xp=np.array([-0.25,0.25])
# Yp=np.array([0.,0.])
# residual=symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp))
# solutionN=optimize.newton(residual,1.)
# solution=gridSearch(residual)
# CFL=max(cx,cy)*solution/2.
# print "Solution DCU is: ",CFL, " (Newton ", 0.5*max(cx,cy)*solutionN,")"
# residual=symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp),(Xp,Yp))
# solutionN=optimize.newton(residual,1.)
# solution=gridSearch(residual)
# CFL=max(cx,cy)*solution/2.
# print "Solution CTU is: ",CFL, " (Newton ",0.5*max(cx,cy)*solutionN,")"
# print " "
# print "=== Symmetric vertical ==="
# Yp=np.array([-0.25,0.25])
# Xp=np.array([0.,0.])
# residual=symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp))
# solutionN=optimize.newton(residual,1.)
# solution=gridSearch(residual)
# CFL=max(cx,cy)*solution/2.
# print "Solution DCU is: ",CFL, " (Newton ", 0.5*max(cx,cy)*solutionN,")"
# residual=symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp),(Xp,Yp))
# solutionN=optimize.newton(residual,1.)
# solution=gridSearch(residual)
# CFL=max(cx,cy)*solution/2.
# print "Solution CTU is: ",CFL, " (Newton ",0.5*max(cx,cy)*solutionN,")"
# CFL=np.linspace(0.,1.,10000)
# res=np.zeros(len(CFL))
# for i in range(len(CFL)):
# res[i]=residual(2.*CFL[i]/max(cx,cy))
# plt.plot(CFL,res,label='residual')
# plt.plot([0.5*max(cx,cy)*solution,0.5*max(cx,cy)*solution],[0,max(res)],'g',label='grid search')
# plt.plot([0.5*max(cx,cy)*solutionN,0.5*max(cx,cy)*solutionN],[0,max(res)],'r',label='Newton')
# plt.legend()
# plt.grid()
# plt.show()
############### 4PPC
print "**************************************************************"
print "****************** 4PPC discretization **********************"
print "**************************************************************"
print "=== Symmetric ==="
# Xp=np.array([-0.25,0.25,0.25,-0.25])
# Yp=np.array([-0.25,-0.25,0.25,0.25])
# solution=optimize.newton(symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp)),1.)
# CFL=max(cx,cy)*solution/2.
# print "Solution DCU is: ",CFL
# residual=symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp),(Xp,Yp))
# solution=optimize.newton(symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp),(Xp,Yp)),1.)
# CFL=max(cx,cy)*solution/2.
# print "Solution CTU is: ",CFL
# print "=== Shiffted symmetrically ==="
# Xp=np.array([-0.5,0.5,0.5,-0.5])
# Yp=np.array([-0.5,-0.5,0.5,0.5])
# solution=optimize.newton(symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp)),1.)
# CFL=max(cx,cy)*solution/2.
# print "Solution DCU is: ",CFL
# residual=symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp),(Xp,Yp))
# solution=optimize.newton(symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp),(Xp,Yp)),1.)
# CFL=max(cx,cy)*solution/2.
# print "Solution CTU is: ",CFL
cx=20.;cy=2.
print "Speeds: cx/cy=",cx/cy
print " "
print "=== Shiffted right ==="
shift=+0.25
Xp=np.array([-0.25,0.25,0.25,-0.25])+shift
Yp=np.array([-0.25,-0.25,0.25,0.25])
solution=optimize.newton(symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp)),1.)
CFL=max(cx,cy)*solution/2.
print "Solution DCU is: ",CFL
solution=optimize.newton(symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp),(Xp,Yp)),1.)
CFL=max(cx,cy)*solution/2.
print "Solution CTU is: ",CFL
print " "
print "=== Shiffted above ==="
Xp=np.array([-0.25,0.25,0.25,-0.25])
Yp=np.array([-0.25,-0.25,0.25,0.25])+shift
solution=optimize.newton(symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp)),1.)
CFL=max(cx,cy)*solution/2.
print "Solution DCU is: ",CFL
solution=optimize.newton(symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp),(Xp,Yp)),1.)
CFL=max(cx,cy)*solution/2.
print "Solution CTU is: ",CFL
print " "
print "=== Shiffted left ==="
shift=-0.25
Xp=np.array([-0.25,0.25,0.25,-0.25])+shift
Yp=np.array([-0.25,-0.25,0.25,0.25])
solution=optimize.newton(symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp)),1.)
CFL=max(cx,cy)*solution/2.
print "Solution DCU is: ",CFL
solution=optimize.newton(symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp),(Xp,Yp)),1.)
CFL=max(cx,cy)*solution/2.
print "Solution CTU is: ",CFL
print " "
print "=== Shiffted below ==="
Xp=np.array([-0.25,0.25,0.25,-0.25])
Yp=np.array([-0.25,-0.25,0.25,0.25])+shift
solution=optimize.newton(symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp)),1.)
CFL=max(cx,cy)*solution/2.
print "Solution DCU is: ",CFL
solution=optimize.newton(symbolResidual(0,cx,cy,(Xp,Yp),(Xp,Yp),(Xp,Yp),(Xp,Yp)),1.)
CFL=max(cx,cy)*solution/2.
print "Solution CTU is: ",CFL
| 35.597122
| 149
| 0.575788
|
7aebea7248e052897061da4c796e9ac751a4f369
| 709
|
py
|
Python
|
ground/core/robust.py
|
lycantropos/ground
|
ef6f54b8cb555af8d9202d621cac57a892ecb78d
|
[
"MIT"
] | 4
|
2021-05-15T19:15:56.000Z
|
2021-11-30T06:19:47.000Z
|
ground/core/robust.py
|
lycantropos/ground
|
ef6f54b8cb555af8d9202d621cac57a892ecb78d
|
[
"MIT"
] | null | null | null |
ground/core/robust.py
|
lycantropos/ground
|
ef6f54b8cb555af8d9202d621cac57a892ecb78d
|
[
"MIT"
] | null | null | null |
from shewchuk import Expansion
from .hints import Scalar
from .primitive import square
def to_cross_product(first_x: Scalar,
first_y: Scalar,
second_x: Scalar,
second_y: Scalar) -> Expansion:
"""
Returns expansion of vectors' cross product.
"""
return Expansion(first_x) * second_y - Expansion(second_x) * first_y
def to_squared_points_distance(first_x: Scalar,
first_y: Scalar,
second_x: Scalar,
second_y: Scalar) -> Expansion:
return (square(Expansion(first_x, -second_x))
+ square(Expansion(first_y, -second_y)))
| 30.826087
| 72
| 0.569817
|
4d032bf0645e5a2e57a859a4ccbd40555d55aee7
| 2,741
|
py
|
Python
|
post_quant/accuracy_test.py
|
qinjian623/pytorch_toys
|
7f4761bddc65282ea31a2d0f9eb146772276dd7c
|
[
"MIT"
] | 56
|
2019-12-03T15:58:01.000Z
|
2022-03-31T07:05:48.000Z
|
post_quant/accuracy_test.py
|
qinjian623/pytorch_toys
|
7f4761bddc65282ea31a2d0f9eb146772276dd7c
|
[
"MIT"
] | 1
|
2021-07-22T08:13:46.000Z
|
2022-03-24T13:23:35.000Z
|
post_quant/accuracy_test.py
|
qinjian623/pytorch_toys
|
7f4761bddc65282ea31a2d0f9eb146772276dd7c
|
[
"MIT"
] | 18
|
2020-05-12T02:17:58.000Z
|
2022-03-31T07:05:47.000Z
|
# Borrowed from examples
import torch
import time
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def validate(val_loader, model,
shut_up=False,
criterion=None, half=False):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if torch.cuda.is_available():
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
if half:
input = input.half()
# compute output
output = model(input)
loss = criterion(output, target) if criterion else None
# measure accuracy and record layer
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item() if loss else 0, input.size(0))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0 and not shut_up:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
if not shut_up:
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
| 30.455556
| 88
| 0.530463
|
8bb88405ae47d47725673fbdb524d3dc4a1dac35
| 759
|
py
|
Python
|
odeExercise/ode.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
odeExercise/ode.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
odeExercise/ode.py
|
terasakisatoshi/pythonCodes
|
baee095ecee96f6b5ec6431267cdc6c40512a542
|
[
"MIT"
] | null | null | null |
import numpy as np
from matplotlib import pyplot as plt
# solve ODE y^{(maxdeg)}=F(x,y',\dots,y^{(maxdeg-1)})
maxdeg = 2
x_init = 0.0
# y(0)=0,y'(0)=-1.0
y0_init, y1_init = 0.0, -1.0
# F=-y
F = lambda x, y: -y[0]
# set initial value
x = x_init
y = np.array([y0_init, y1_init])
# create tmp array to define fv
fs = [lambda x, y: y[k+1] for k in range(maxdeg-1)]
fs.append(lambda x, y: F(x, y))
fv = lambda x, y: np.array([fs[k](x, y) for k in range(len(fs))])
# apply Heun method
phi = lambda x, y: (fv(x, y) + fv(x+h, y+h*fv(x, y)))/2.0
maxiterator = 1000
h = 0.1
xplt, ys = [], []
for i in range(maxiterator):
xplt.append(x)
ys.append(y.copy())
x += h
y += h*phi(x, y)
yplt = np.array(ys).transpose()[0]
plt.plot(xplt, yplt)
plt.show()
| 21.083333
| 65
| 0.594203
|
f5df20a3535c9829ebf6fd3e6f67d0fb49a18534
| 422
|
py
|
Python
|
leetcode/editor/practice/skip_list.py
|
KAIKAIZHANG/Algorithm
|
755547dea7f055919abfe9165279fc08e120b75d
|
[
"MIT"
] | 1
|
2019-03-01T09:00:40.000Z
|
2019-03-01T09:00:40.000Z
|
leetcode/editor/practice/skip_list.py
|
KAIKAIZHANG/Algorithm
|
755547dea7f055919abfe9165279fc08e120b75d
|
[
"MIT"
] | null | null | null |
leetcode/editor/practice/skip_list.py
|
KAIKAIZHANG/Algorithm
|
755547dea7f055919abfe9165279fc08e120b75d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
跳表的实现
"""
import random
from typing import Optional
class ListNode:
def __init__(self, data: Optional[int] = None):
self._data = data
self._forwards = [] # Forward pointers
class SkipList:
_MAX_LEVEL = 16
def __init__(self):
self._level_count = 1
self._head = ListNode()
self._head._forwards = [None]*type(self)._MAX_LEVEL
| 15.62963
| 59
| 0.599526
|
f5840223c98f7a77cbebcbe0f617f0bbe1a0c153
| 216
|
py
|
Python
|
lectures/scripts-DATA/physics/mean.py
|
cmsc6950-spring-2018/CMSC6950.github.io
|
9dcbddc30c0df1550f796a836f956aaaa091573a
|
[
"CC-BY-4.0"
] | null | null | null |
lectures/scripts-DATA/physics/mean.py
|
cmsc6950-spring-2018/CMSC6950.github.io
|
9dcbddc30c0df1550f796a836f956aaaa091573a
|
[
"CC-BY-4.0"
] | null | null | null |
lectures/scripts-DATA/physics/mean.py
|
cmsc6950-spring-2018/CMSC6950.github.io
|
9dcbddc30c0df1550f796a836f956aaaa091573a
|
[
"CC-BY-4.0"
] | 1
|
2020-05-23T00:59:08.000Z
|
2020-05-23T00:59:08.000Z
|
def mean(num_list):
if len(num_list) == 0 :
raise Exception("The algebraic mean of an empty list is undefined. \
Please provide a list of numbers")
else :
return sum(num_list)/len(num_list)
| 30.857143
| 74
| 0.657407
|
1abf09bbd521a2a521fffefa046029add9690aae
| 11,583
|
py
|
Python
|
features/steps/managers/kobiton_manager.py
|
lordkyzr/launchkey-python
|
4a6c13c2e60c5f38c4cb749d6a887eb1ac813c0c
|
[
"MIT"
] | 9
|
2017-10-12T02:45:23.000Z
|
2021-01-11T05:44:13.000Z
|
features/steps/managers/kobiton_manager.py
|
lordkyzr/launchkey-python
|
4a6c13c2e60c5f38c4cb749d6a887eb1ac813c0c
|
[
"MIT"
] | 31
|
2018-09-12T00:17:10.000Z
|
2022-01-31T21:35:04.000Z
|
features/steps/managers/kobiton_manager.py
|
lordkyzr/launchkey-python
|
4a6c13c2e60c5f38c4cb749d6a887eb1ac813c0c
|
[
"MIT"
] | 11
|
2017-01-31T21:45:29.000Z
|
2022-01-28T00:56:48.000Z
|
import requests
from time import sleep
class Version:
def __init__(self, id, state=None, version=None, native_properties=None, latest=None):
"""
Kobiton App Version.
Note that no values are required based on the spec so any value can
default to None.
See: See: https://api.kobiton.com/docs/#app
:param id:
:param state:
:param version:
:param native_properties:
:param latest:
"""
self.id = id
self.state = state
self.version = version
self.native_properties = native_properties
self.latest = latest
class App:
def __init__(self, id, name=None, state=None, created_at=None, private_access=None, os=None, created_by=None,
bypass=None, organization_id=None, icon_url=None, versions=None):
"""
Kobiton app
Note that no values are required based on the spec so any value can
default to None.
See: https://api.kobiton.com/docs/#app
:param id:
:param name:
:param state:
:param created_at:
:param private_access:
:param os:
:param created_by:
:param bypass:
:param organization_id:
:param icon_url:
:param versions:
"""
self.id = id
self.name = name
self.state = state
self.created_at = created_at
self.private_access = private_access
self.os = os
self.created_by = created_by
self.bypass = bypass
self.organization_id = organization_id
self.icon_url = icon_url
self.versions = versions
def __repr__(self):
return "App <id={id}, name=\"{name}\", state=\"{state}\">".format(
id=self.id,
name=self.name,
state=self.state
)
class Device:
def __init__(self, id, udid, is_booked, is_hidden, is_online, model_name, device_name,
resolution, platform_name, platform_version, installed_browsers, support,
device_image_url, is_favorite, is_cloud, is_my_org, is_my_own, hosted_by):
"""
Kobition device
Note that no values are required based on the spec so any value can
default to None.
See: https://api.kobiton.com/docs/#clouddevice
:param id:
:param udid:
:param is_booked:
:param is_hidden:
:param is_online:
:param model_name:
:param device_name:
:param resolution:
:param platform_name:
:param platform_version:
:param installed_browsers:
:param support:
:param device_image_url:
:param is_favorite:
:param is_cloud:
:param is_my_org:
:param is_my_own:
:param hosted_by:
"""
self.id = id
self.udid = udid
self.is_booked = is_booked
self.is_hidden = is_hidden
self.is_online = is_online
self.model_name = model_name
self.device_name = device_name
self.resolution = resolution
self.platform_name = platform_name
self.platform_version = platform_version
self.installed_browser = installed_browsers
self.support = support
self.device_image_url = device_image_url
self.is_favorite = is_favorite
self.is_cloud = is_cloud
self.is_my_org = is_my_org
self.is_my_own = is_my_own
self.hosted_by = hosted_by
def __repr__(self):
return "Device <{device_name}>".format(device_name=self.device_name)
class KobitonManager:
def __init__(self, username, sdk_key, url='https://api.kobiton.com', api_version='v1'):
"""
Manager for interacting with Kobiton
:param username: Kobition username
:param sdk_key: Kobiton sdk key associated with the given username
:param url: Kobiton API url
:param api_version: Kobiton API version
"""
self.__username = username
self.__sdk_key = sdk_key
self.__url = url
self.__api_version = api_version
def _create_request(self, method, endpoint, json=None, data=None,
params=None):
"""
Creates an request to the Kobition API
:param method: HTTP method to use
:param endpoint: API endpoint to query IE: devices, sessions, user, app
:param json: Optional. JSON body data to include.
:param data: Optional. Dictionary, list of tuples, bytes, or file-like
object to send in the body.
:param params: Optional. GET parameters to include.
:return: Dictionary containing response data or boolean stating success
status if no data was returned.
"""
response = getattr(requests, method.lower())(
self.__url + "/" + self.__api_version + "/" + endpoint,
headers={
'Accept': 'application/json'
},
auth=(self.__username, self.__sdk_key),
data=data,
json=json,
params=params
)
response.raise_for_status()
return response.json() if response.text != "OK" else response.ok
def _generate_upload_url(self, filename):
"""
Generates an upload URL
https://api.kobiton.com/docs/#generate-upload-url
:param filename:
:return: Dictionary containing appPath and url (S3 bucket url).
"""
return self._create_request('post', 'apps/uploadUrl/', json={
"filename": filename
})
def _create_app(self, app_name, app_path):
"""
Creates an application to be accessed by Kobiton devices
https://api.kobiton.com/docs/#create-application-or-version
:param app_name: Designated app filename IE: my_app.apk
:param app_path: App path returned by the _generate_upload_url()
:return: Dictionary containing filename and appId keys
"""
return self._create_request('post', 'apps', json={
"filename": app_name,
"appPath": app_path
})
def _upload_app_to_s3(self, app_path, s3_url):
"""
Uploads a given app to a S3 url
:param app_path: Filepath to the app to be uploaded.
:param s3_url: S3 URL to upload to. This url should have been returned
by _generate_upload_url().
:return: None
"""
with open(app_path, 'rb') as f:
data = f.read()
response = requests.put(
s3_url,
data=data,
headers={
'Content-Type': 'application/octet-stream',
'x-amz-tagging': 'unsaved=true'
}
)
response.raise_for_status()
def get_apps(self):
"""
Get list of applications which were added to the Apps Repo.
https://api.kobiton.com/docs/#get-applications
:return: List of kobiton_manager.App objects.
"""
return [
App(
app['id'],
app['name'],
app['state'],
created_at=app.get('createdAt'),
private_access=app.get('privateAccess'),
os=app.get('os'),
created_by=app.get('createdBy'),
bypass=app.get('bypass'),
organization_id=app.get('organizationId'),
icon_url=app.get('iconUrl'),
versions=[
Version(
version['id'],
version['state'],
version['version'],
version['nativeProperties'],
version.get('latest')
) for version in app.get('versions', [])
]
) for app in self._create_request('get', 'apps').get('apps', [])
]
def get_app(self, app_id):
"""
Get information about an application.
https://api.kobiton.com/docs/#get-an-application
:param app_id: The ID to the app
:return: kobiton_manager.App object
"""
app = self._create_request('get', 'apps/%s' % app_id)
return App(
app['id'],
app['name'],
app['state'],
created_at=app.get('createdAt'),
private_access=app.get('privateAccess'),
os=app.get('os'),
created_by=app.get('createdBy'),
bypass=app.get('bypass'),
organization_id=app.get('organizationId'),
icon_url=app.get('iconUrl'),
versions=[
Version(
version['id'],
version['state'],
version['version'],
version['nativeProperties'],
version.get('latest')
) for version in app.get('versions', [])
]
)
def upload_app(self, app_path, app_name=None, retrieve_app_status=False):
"""
Uploads an application via Kobiton's application upload flow:
https://docs.kobiton.com/basic/app-repository/integrate-apps-repo-with-ci/
:param app_path: Filepath to the app to be uploaded.
:param app_name: Optional. App name to label the uploaded app as.
:param retrieve_app_status: Whether to pull the full app information
after upload. If not, an app with only id and the uploaded version id
will be returned.
:return: kobiton_manager.App object
"""
app_name = app_name if app_name else app_path.split("/")[-1]
upload_data = self._generate_upload_url(app_name)
self._upload_app_to_s3(app_path, upload_data['url'])
app = self._create_app(app_name, upload_data['appPath'])
if retrieve_app_status:
try:
app = self.get_app(app['appId'])
except requests.HTTPError:
# We seem to be getting a 500 if we query
# immediately after creating the app
sleep(2)
app = self.get_app(app['appId'])
else:
app = App(app['appId'], versions=[Version(app['versionId'])])
return app
def delete_app(self, app_id):
"""
Deletes a given APP ID from Kobiton
:param app_id:
:return:
"""
return self._create_request('delete', 'apps/%s' % app_id)
def get_devices(self):
"""
Retrieves a list of Kobiton devices
:return: List of kobiton_manager.Device objects
"""
response = self._create_request(
'get',
'devices'
)
return [
Device(
device.get('id'),
device.get('udid'),
device.get('isBooked'),
device.get('isHidden'),
device.get('isOnline'),
device.get('modelName'),
device.get('deviceName'),
device.get('resolution'),
device.get('platformName'),
device.get('platformVersion'),
device.get('installedBrowsers'),
device.get('support'),
device.get('deviceImageUrl'),
device.get('isFavorite'),
device.get('isCloud'),
device.get('isMyOrg'),
device.get('isMyOwn'),
device.get('hostedBy')
) for device in response['cloudDevices']
]
| 34.679641
| 113
| 0.555469
|
5990789e1af2d1b83eac8c7f3470da95965b40ab
| 444
|
py
|
Python
|
cog/migrations/0006_auto_20160303_1043.py
|
downiec/COG
|
cea8ceac701958b6af8e272698bfb08d89f62bf4
|
[
"BSD-3-Clause"
] | 6
|
2016-03-10T19:38:17.000Z
|
2021-02-23T09:34:59.000Z
|
cog/migrations/0006_auto_20160303_1043.py
|
downiec/COG
|
cea8ceac701958b6af8e272698bfb08d89f62bf4
|
[
"BSD-3-Clause"
] | 602
|
2015-01-05T16:30:08.000Z
|
2021-02-02T21:44:38.000Z
|
cog/migrations/0006_auto_20160303_1043.py
|
cedadev/COG
|
6167f9114c7cf0422b34fb9f5f3f07f9657a7dbe
|
[
"BSD-3-Clause"
] | 18
|
2015-02-12T15:50:17.000Z
|
2021-04-27T16:40:36.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-03-03 10:43
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cog', '0005_project_shared'),
]
operations = [
migrations.AlterField(
model_name='bookmark',
name='name',
field=models.CharField(max_length=1000),
),
]
| 21.142857
| 52
| 0.61036
|
f6c2497aff4a7407319dec0fb891e82376628440
| 38
|
py
|
Python
|
scvelo/pl.py
|
WeilerP/scvelo
|
1805ab4a72d3f34496f0ef246500a159f619d3a2
|
[
"BSD-3-Clause"
] | 272
|
2018-08-21T08:59:11.000Z
|
2022-03-30T11:24:19.000Z
|
scvelo/pl.py
|
theislab/scvelo
|
1805ab4a72d3f34496f0ef246500a159f619d3a2
|
[
"BSD-3-Clause"
] | 570
|
2018-08-21T14:04:03.000Z
|
2022-03-30T08:48:04.000Z
|
scvelo/pl.py
|
WeilerP/scvelo
|
1805ab4a72d3f34496f0ef246500a159f619d3a2
|
[
"BSD-3-Clause"
] | 105
|
2018-09-04T14:08:58.000Z
|
2022-03-17T16:20:14.000Z
|
from scvelo.plotting import * # noqa
| 19
| 37
| 0.736842
|
e69333cf688de2dce9b6041fb6294a1da7cc265c
| 17,324
|
py
|
Python
|
prereise/gather/griddata/transmission/geometry.py
|
lanesmith/PreREISE
|
d9003b042cef18a064d6eb689b1218b5533712e2
|
[
"MIT"
] | null | null | null |
prereise/gather/griddata/transmission/geometry.py
|
lanesmith/PreREISE
|
d9003b042cef18a064d6eb689b1218b5533712e2
|
[
"MIT"
] | null | null | null |
prereise/gather/griddata/transmission/geometry.py
|
lanesmith/PreREISE
|
d9003b042cef18a064d6eb689b1218b5533712e2
|
[
"MIT"
] | 1
|
2022-02-23T20:43:36.000Z
|
2022-02-23T20:43:36.000Z
|
import cmath
from dataclasses import dataclass, field
from itertools import combinations
from math import exp, log, pi, sqrt
from statistics import geometric_mean
from prereise.gather.griddata.transmission.const import (
epsilon_0,
mu_0,
relative_permeability,
resistivity,
)
from prereise.gather.griddata.transmission.helpers import (
DataclassWithValidation,
approximate_loadability,
get_standard_conductors,
)
@dataclass
class Conductor(DataclassWithValidation):
"""Represent a single conductor (which may be a stranded composite). Conductors can
be instantiated by either:
- looking them up via their standardized bird ``name``,
- passing the parameters relevant to impedance and rating calculations
(``radius``, ``gmr``, ``resistance_per_km``, and ``current_limit``), or
- passing paramters which can be used to estimate parameters relevant to impedance
and rating calculations (``radius``, ``material``, and ``current_limit``). In this
case, a solid conductor is assumed.
:param str name: name of standard conductor.
:param float radius: outer radius of conductor.
:param str material: material of conductor. Used to calculate ``resistance_per_km``
and ``gmr`` if these aren't passed to the constructor, unnecessary otherwise.
:param float resistance_per_km: resistance (ohms) per kilometer. Will be estimated
from other parameters if it isn't passed.
:param float gmr: geometric mean radius of conductor. Will be estimated from
other parameters if it isn't passed.
:param float area: cross-sectional area of conductor. Will be estimated from
other parameters if it isn't passed.
"""
name: str = None
radius: float = None
material: str = None
resistance_per_km: float = None
gmr: float = None
area: float = None
permeability: float = None
current_limit: float = None
def __post_init__(self):
physical_parameters = {
self.radius,
self.material,
self.resistance_per_km,
self.gmr,
self.area,
self.permeability,
self.current_limit,
}
# Validate inputs
self.validate_input_types() # defined in DataclassWithValidation
if self.name is not None:
if any([a is not None for a in physical_parameters]):
raise TypeError("If name is specified, no other parameters can be")
self._get_parameters_from_standard_conductor_table()
else:
self._infer_missing_parameters()
def _infer_missing_parameters(self):
if self.gmr is None and (self.material is None):
raise ValueError(
"If gmr is not provided, material and radius are needed to estimate"
)
if self.resistance_per_km is None and self.material is None:
raise ValueError(
"If resistance_per_km is not provided, material is needed to estimate"
)
# Estimate missing inputs using the inputs which are present
if self.gmr is None:
try:
self.permeability = relative_permeability[self.material]
except KeyError:
raise ValueError(
f"Unknown permeability for {self.material}, can't calculate gmr"
)
self.gmr = self.radius * exp(self.permeability / 4)
if self.resistance_per_km is None:
try:
self.resistivity = resistivity[self.material]
except KeyError:
raise ValueError(
f"Unknown resistivity for {self.material}, "
"can't calculate resistance"
)
if self.area is None:
self.area = pi * self.radius**2
# convert per-m to per-km
self.resistance_per_km = self.resistivity * 1000 / self.area
def _get_parameters_from_standard_conductor_table(self):
standard_conductors = get_standard_conductors()
title_cased_name = self.name.title()
if title_cased_name not in standard_conductors.index:
raise ValueError(f"No conductor named '{self.name}' in standard table")
data = standard_conductors.loc[title_cased_name]
self.gmr = data["gmr_mm"] / 1e3
self.radius = data["radius_mm"] / 1e3
self.resistance_per_km = data["resistance_ac_per_km_75c"]
self.current_limit = data["max_amps"]
self.name = title_cased_name
@dataclass
class ConductorBundle(DataclassWithValidation):
"""Represent a bundle of conductors (or a 'bundle' of one).
:param int n: number of conductors in bundle (can be one).
:param Conductor conductor: information for each conductor.
:param float spacing: distance between the centers of each conductor (meters).
:param str layout: either 'circular' (conductors are arranged in a regular polygon
with edge length ``spacing``) or 'flat' (conductors are arranged in a line, at
regular spacing ``spacing``).
"""
conductor: Conductor
n: int = 1
spacing: float = None # we need to be able to ignore spacing for a single conductor
layout: str = "circular"
resistance_per_km: float = field(init=False)
spacing_L: float = field(init=False) # noqa: N815
spacing_C: float = field(init=False) # noqa: N815
current_limit: float = field(init=False, default=None)
def __post_init__(self):
self.validate_input_types() # defined in DataclassWithValidation
if self.n != 1 and self.spacing is None:
raise ValueError("With multiple conductors, spacing must be specified")
self.resistance_per_km = self.conductor.resistance_per_km / self.n
self.spacing_L = self.calculate_equivalent_spacing("inductance")
self.spacing_C = self.calculate_equivalent_spacing("capacitance")
if self.conductor.current_limit is not None:
self.current_limit = self.conductor.current_limit * self.n
def calculate_equivalent_spacing(self, type="inductance"):
if type == "inductance":
conductor_distance = self.conductor.gmr
elif type == "capacitance":
conductor_distance = self.conductor.radius
else:
raise ValueError("type must be either 'inductance' or 'capacitance'")
if self.n == 1:
return conductor_distance
elif self.n == 2:
return (conductor_distance * self.spacing) ** (1 / 2)
else:
if self.layout == "circular":
return self.calculate_equivalent_spacing_circular(conductor_distance)
if self.layout == "flat":
return self.calculate_equivalent_spacing_flat(conductor_distance)
raise ValueError(f"Unknown layout: {self.layout}")
def calculate_equivalent_spacing_circular(self, conductor_distance):
if self.n == 3:
return (conductor_distance * self.spacing**2) ** (1 / 3)
if self.n == 4:
return (conductor_distance * self.spacing**3 * 2 ** (1 / 2)) ** (1 / 4)
raise NotImplementedError(
"Geometry calculations are only implemented for 1 <= n <= 4"
)
def calculate_equivalent_spacing_flat(self, conductor_distance):
if self.n == 3:
return (conductor_distance * 2 * self.spacing**2) ** (1 / 3)
if self.n == 4:
return (conductor_distance * 12 * self.spacing**3) ** (1 / 8)
raise NotImplementedError(
"Geometry calculations are only implemented for 1 <= n <= 4"
)
@dataclass
class PhaseLocations(DataclassWithValidation):
"""Represent the locations of each conductor bundle on a transmission tower. Each of
``a``, ``b``, and ``c`` are the (x, y) location(s) of that phase's conductor(s).
:param tuple a: the (x, y) location of the single 'A' phase conductor if
``circuits`` == 1, or the ((x1, y1), (x2, y2), ...) locations of the 'A' phase
conductors if ``circuits`` > 1. Units are meters.
:param tuple b: the (x, y) location of the single 'B' phase conductor if
``circuits`` == 1, or the ((x1, y1), (x2, y2), ...) locations of the 'B' phase
conductors if ``circuits`` > 1. Units are meters.
:param tuple c: the (x, y) location of the single 'C' phase conductor if
``circuits`` == 1, or the ((x1, y1), (x2, y2), ...) locations of the 'C' phase
conductors if ``circuits`` > 1. Units are meters.
:param int circuits: the number of circuits on the tower.
"""
a: tuple
b: tuple
c: tuple
circuits: int = 1
equivalent_distance: float = field(init=False)
equivalent_height: float = field(init=False)
phase_self_distances: dict = field(init=False, default=None)
equivalent_reflected_distance: float = field(init=False)
def __post_init__(self):
self.validate_input_types() # defined in DataclassWithValidation
if not (len(self.a) == len(self.b) == len(self.c)):
raise ValueError("each phase location must have the same length")
if self.circuits == 1 and len(self.a) == 2:
# Single-circuit specified as (x, y) will be converted to ((x, y))
self.a = (self.a,)
self.b = (self.b,)
self.c = (self.c,)
self.calculate_distances()
def calculate_distances(self):
self.true_distance = {
"ab": _geometric_mean_euclidian(self.a, self.b),
"ac": _geometric_mean_euclidian(self.a, self.c),
"bc": _geometric_mean_euclidian(self.b, self.c),
}
# 'Equivalent' distances are geometric means
self.equivalent_distance = geometric_mean(self.true_distance.values())
self.equivalent_height = geometric_mean(
[self.a[0][1], self.b[0][1], self.c[0][1]]
)
if self.circuits == 1:
self.calculate_single_circuit_distances()
else:
self.calculate_multi_circuit_distances()
def calculate_single_circuit_distances(self):
# The distance bounced off the ground, or 'reflected', is used for
# single-circuit capacitance calculations
self.reflected_distance = {
"ab": _euclidian(self.a[0], (self.b[0][0], -self.b[0][1])), # a -> b'
"ac": _euclidian(self.a[0], (self.c[0][0], -self.c[0][1])), # a -> c'
"bc": _euclidian(self.b[0], (self.c[0][0], -self.c[0][1])), # b -> c'
}
self.equivalent_reflected_distance = geometric_mean(
self.reflected_distance.values()
)
def calculate_multi_circuit_distances(self):
self.phase_self_distances = [
geometric_mean(_euclidian(p0, p1) for p0, p1 in combinations(phase, 2))
for phase in (self.a, self.b, self.c)
]
# Multi circuit, so we assume tall tower negligible impact from reflectance
self.equivalent_reflected_distance = 2 * self.equivalent_height
@dataclass
class Tower(DataclassWithValidation):
"""Given the geometry of a transmission tower and conductor bundle information,
estimate per-kilometer inductance, resistance, and shunt capacitance.
:param PhaseLocations locations: the locations of each conductor bundle.
:param ConductorBundle bundle: the parameters of each conductor bundle.
"""
locations: PhaseLocations
bundle: ConductorBundle
resistance: float = field(init=False)
inductance: float = field(init=False)
capacitance: float = field(init=False)
phase_current_limit: float = field(init=False, default=None)
def __post_init__(self):
self.validate_input_types() # defined in DataclassWithValidation
self.resistance = self.bundle.resistance_per_km / self.locations.circuits
self.inductance = self.calculate_inductance_per_km()
self.capacitance = self.calculate_shunt_capacitance_per_km()
if self.bundle.current_limit is not None:
self.phase_current_limit = (
self.bundle.current_limit * self.locations.circuits
)
def calculate_inductance_per_km(self):
denominator = _circuit_bundle_distances(
self.bundle.spacing_L, self.locations.phase_self_distances
)
inductance_per_km = (
mu_0 / (2 * pi) * log(self.locations.equivalent_distance / denominator)
)
return inductance_per_km
def calculate_shunt_capacitance_per_km(self):
denominator = _circuit_bundle_distances(
self.bundle.spacing_C, self.locations.phase_self_distances
)
capacitance_per_km = (2 * pi * epsilon_0) / (
log(self.locations.equivalent_distance / denominator)
- log(
self.locations.equivalent_reflected_distance
/ (2 * self.locations.equivalent_height)
)
)
return capacitance_per_km
@dataclass
class Line(DataclassWithValidation):
"""Given a Tower design, line voltage, and length, calculate whole-line impedances
and rating.
:param Tower tower: tower parameters (containing per-kilometer impedances).
:param int/float length: line length (kilometers).
:param int/float voltage: line voltage (kilovolts).
:param int/float freq: the system nominal frequency (Hz).
"""
tower: Tower
length: float
voltage: float
freq: float = 60.0
series_impedance_per_km: complex = field(init=False)
shunt_admittance_per_km: complex = field(init=False)
propogation_constant_per_km: complex = field(init=False)
surge_impedance: complex = field(init=False)
series_impedance: complex = field(init=False)
shunt_admittance: complex = field(init=False)
thermal_rating: float = field(init=False, default=None)
stability_rating: float = field(init=False, default=None)
power_rating: float = field(init=False, default=None)
def __post_init__(self):
# Convert integers to floats as necessary
for attr in ("freq", "length", "voltage"):
if isinstance(getattr(self, attr), int):
setattr(self, attr, float(getattr(self, attr)))
self.validate_input_types() # defined in DataclassWithValidation
# Calculate second-order electrical parameters which depend on frequency
omega = 2 * pi * self.freq
self.series_impedance_per_km = (
self.tower.resistance + 1j * self.tower.inductance * omega
)
self.shunt_admittance_per_km = 1j * self.tower.capacitance * omega
self.surge_impedance = cmath.sqrt(
self.series_impedance_per_km / self.shunt_admittance_per_km
)
self.propogation_constant_per_km = cmath.sqrt(
self.series_impedance_per_km * self.shunt_admittance_per_km
)
self.surge_impedance_loading = (
self.tower.locations.circuits
* self.voltage**2
/ abs(self.surge_impedance)
)
# Calculate loadability (depends on length)
if self.tower.phase_current_limit is not None:
self.thermal_rating = (
self.voltage * self.tower.phase_current_limit * sqrt(3) / 1e3 # MW
)
self.stability_rating = (
self.tower.locations.circuits
* approximate_loadability(self.length)
* self.surge_impedance_loading
)
self.power_rating = min(self.thermal_rating, self.stability_rating)
# Use the long-line transmission model to calculate lumped-element parameters
self.series_impedance = (self.series_impedance_per_km * self.length) * (
cmath.sinh(self.propogation_constant_per_km * self.length)
/ (self.propogation_constant_per_km * self.length)
)
self.shunt_admittance = (self.shunt_admittance_per_km * self.length) * (
cmath.tanh(self.propogation_constant_per_km * self.length / 2)
/ (self.propogation_constant_per_km * self.length / 2)
)
def _euclidian(a, b):
"""Calculate the euclidian distance between two points."""
try:
if len(a) != len(b):
raise ValueError("Length of a and b must be equivalent")
except TypeError:
raise TypeError(
"a and b must both be iterables compatible with the len() function"
)
return sqrt(sum((x - y) ** 2 for x, y in zip(a, b)))
def _geometric_mean_euclidian(a_list, b_list):
"""Calculate the geometric mean euclidian distance between two coordinate lists."""
try:
if len(a_list) != len(b_list):
raise ValueError("Length of a_list and b_list must be equivalent")
except TypeError:
raise TypeError("a_list and b_list must both be iterables")
return geometric_mean(_euclidian(a, b) for a in a_list for b in b_list)
def _circuit_bundle_distances(bundle_distance, phase_distances=None):
"""Calculate characteristic distance of bundle and circuit distances."""
if phase_distances is None:
return bundle_distance
phase_characteristic_distances = [
sqrt(phase * bundle_distance) for phase in phase_distances
]
return geometric_mean(phase_characteristic_distances)
| 42.565111
| 88
| 0.648984
|
d943e5465c459c0279dae55fb74ef070bb61050d
| 1,566
|
py
|
Python
|
server.py
|
mgrabina/galoonline
|
f7ce7c366d7662a0d984f123ecc565fb70dd36e1
|
[
"MIT"
] | null | null | null |
server.py
|
mgrabina/galoonline
|
f7ce7c366d7662a0d984f123ecc565fb70dd36e1
|
[
"MIT"
] | null | null | null |
server.py
|
mgrabina/galoonline
|
f7ce7c366d7662a0d984f123ecc565fb70dd36e1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import threading
import socket
from concurrent.futures.thread import ThreadPoolExecutor
from src import tcp_server
from src import logic
def client_handler(client: socket.socket):
logic.menu(client)
while True:
msg = tcp_server.recv_msg(client)
if client is None:
return
if msg is not None:
msg = msg.split()
# Validate Command
if logic.validate_command(msg):
tcp_server.send_msg(client, logic.error_msg_prefix + "Invalid command")
continue
# Validate Registry
if logic.validate_registration(client, msg) and 'exit' not in msg:
tcp_server.send_msg(client, logic.error_msg_prefix + "Please register first. \n")
continue
# Then execute game action
try:
threading.Thread(target=logic.commands_handler.get(msg[0].lower()), args=(client, msg,)).start()
except:
if client is None or client.fileno() == -1:
break
tcp_server.send_msg(client, "There was a problem procesing your request\n")
server = None
def main():
server = tcp_server.start_server()
while True:
try:
connection = tcp_server.connect(server)
if connection is None:
raise SystemExit
tcp_server.start_handler(client_handler, connection)
except (KeyboardInterrupt, SystemExit):
tcp_server.end_server(server)
print('Ending Server. Goodbye.')
raise
main()
| 28.472727
| 108
| 0.619413
|
dc8f4480eb574ed98538c2a16befd555262d0d8a
| 1,662
|
py
|
Python
|
bin/musicboards_scraper.py
|
kemskems/otdet
|
a5e52e2d5ab1aea2f1b63676c87a051f187567da
|
[
"MIT"
] | 1
|
2015-10-03T18:20:25.000Z
|
2015-10-03T18:20:25.000Z
|
bin/musicboards_scraper.py
|
kemskems/otdet
|
a5e52e2d5ab1aea2f1b63676c87a051f187567da
|
[
"MIT"
] | null | null | null |
bin/musicboards_scraper.py
|
kemskems/otdet
|
a5e52e2d5ab1aea2f1b63676c87a051f187567da
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import argparse
import os
import os.path
from bs4 import BeautifulSoup
import requests
def post_filter(tag):
"""Filter tag containing post."""
if tag.name != 'blockquote':
return False
if not tag.has_attr('class'):
return False
if isinstance(tag['class'], str):
return tag['class'] == 'postcontent'
else:
return 'postcontent' in tag['class'] and \
'lastedited' not in tag['class']
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrape Musicboards forum')
parser.add_argument('id', type=str, help='Thread ID')
parser.add_argument('-p', '--page', type=int, default=1,
help='Page number')
parser.add_argument('-o', '--outdir', type=str, help='Output directory')
parser.add_argument('-n', '--number', type=int, default=0,
help='Start post number')
args = parser.parse_args()
url = 'http://www.musicboards.com/showthread.php/{}/page{}'
url = url.format(args.id, args.page)
r = requests.get(url)
if r.status_code == 200:
# Create save directory
if args.outdir is not None:
savedir = os.path.join(args.outdir, args.id)
else:
savedir = args.id
os.makedirs(savedir, exist_ok=True)
soup = BeautifulSoup(r.text)
for i, post in enumerate(soup.find_all(post_filter)):
num = i + args.number
savefile = os.path.join(savedir, 'post-{}.txt'.format(num))
with open(savefile, 'w') as fout:
print('\n'.join(list(post.stripped_strings)), file=fout)
| 33.24
| 76
| 0.600481
|
53103b18e1c00d75bb5958931d4fa0928a4d89ce
| 1,314
|
py
|
Python
|
Test/Misc/generator_close.py
|
rch/pyke-1.1.1
|
e399b06f0c655eb6baafebaed09b4eb8f9c44b82
|
[
"MIT"
] | 76
|
2015-04-20T12:10:25.000Z
|
2021-11-27T20:26:27.000Z
|
Test/Misc/generator_close.py
|
w-simon/pyke
|
cfe95d8aaa06de123264f9b7f5bea20eb5924ecd
|
[
"MIT"
] | 2
|
2016-03-09T14:33:27.000Z
|
2018-10-22T11:25:49.000Z
|
Test/Misc/generator_close.py
|
w-simon/pyke
|
cfe95d8aaa06de123264f9b7f5bea20eb5924ecd
|
[
"MIT"
] | 42
|
2015-03-16T13:11:30.000Z
|
2022-02-12T14:45:48.000Z
|
# generator_close.py
import sys
class Backup(Exception): pass
def gen1(name, n):
try:
try:
for i in range(n): yield i
except Exception, e:
print "gen1(%s) => %s: %s" % (name, e.__class__.__name__, str(e))
raise
finally:
print "gen1(%s) done" % name
def gen2(name):
try:
try:
for i in gen1("first", 5):
for j in gen_exception("second"): yield i, j
except Exception, e:
print "gen2(%s) => %s: %s" % (name, e.__class__.__name__, str(e))
raise
finally:
print "gen2(%s) done" % name
def gen_exception(name):
try:
try:
for i in gen1("inner", 5):
if i == 2:
print "gen_exception(%s): raising exception" % name
raise Backup("gen_exception(%s): hello bob" % name)
yield i
except Exception, e:
print "gen_exception(%s) => %s: %s" % \
(name, e.__class__.__name__, str(e))
raise
finally:
print "gen_exception(%s) done" % name
def test():
for i, x in enumerate(gen2("top")):
print "got", x
if i == 3:
print "test: raising exception"
raise Backup("test: hello bob")
| 26.816327
| 77
| 0.487062
|
6144ac8ed1ce668c7ef5a3213525d22c83442ed1
| 2,566
|
py
|
Python
|
Pseudocode/Tests/tests.py
|
ashtonhess/augurFork4320Group1
|
642f3df9480d50bf6e1908ad37a296e73ae6c3b5
|
[
"MIT"
] | null | null | null |
Pseudocode/Tests/tests.py
|
ashtonhess/augurFork4320Group1
|
642f3df9480d50bf6e1908ad37a296e73ae6c3b5
|
[
"MIT"
] | null | null | null |
Pseudocode/Tests/tests.py
|
ashtonhess/augurFork4320Group1
|
642f3df9480d50bf6e1908ad37a296e73ae6c3b5
|
[
"MIT"
] | null | null | null |
#SPDX-License-Identifier: MIT
"""
Metrics that provides data about contributors & their associated activity
"""
import datetime
import sqlalchemy as s
import pandas as pd
from augur.util import register_metric
@register_metric()
def issues_first_time_opened(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None):
"""
Returns a timeseries of the count of persons opening an issue for the first time.
:param repo_id: The repository's id
:param repo_group_id: The repository's group id
:param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of persons/period
"""
if not begin_date:
begin_date = '1970-1-1 00:00:01'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
#found on line 356 of contributors.py below
if repo_id:
linesChangedByAuthorSQL = s.sql.text("""
SELECT cmt_author_email, date_trunc('week', cmt_author_date::date) as cmt_author_date, cmt_author_affiliation as affiliation,
SUM(cmt_added) as additions, SUM(cmt_removed) as deletions, SUM(cmt_whitespace) as whitespace, repo_name
FROM commits JOIN repo ON commits.repo_id = repo.repo_id
WHERE commits.repo_id = :repo_id
GROUP BY commits.repo_id, date_trunc('week', cmt_author_date::date), cmt_author_affiliation, cmt_author_email, repo_name
ORDER BY date_trunc('week', cmt_author_date::date) ASC;
""")
results = pd.read_sql(linesChangedByAuthorSQL, self.database, params={"repo_id": repo_id})
return results
else:
linesChangedByAuthorSQL = s.sql.text("""
SELECT cmt_author_email, date_trunc('week', cmt_author_date::date) as cmt_author_date, cmt_author_affiliation as affiliation,
SUM(cmt_added) as additions, SUM(cmt_removed) as deletions, SUM(cmt_whitespace) as whitespace
FROM commits
WHERE repo_id in (SELECT repo_id FROM repo WHERE repo_group_id=:repo_group_id)
GROUP BY repo_id, date_trunc('week', cmt_author_date::date), cmt_author_affiliation, cmt_author_email
ORDER BY date_trunc('week', cmt_author_date::date) ASC;
""")
results = pd.read_sql(linesChangedByAuthorSQL, self.database, params={"repo_group_id": repo_group_id})
return results
| 49.346154
| 137
| 0.698753
|
07048cf68353a018aaba0c3123f0bc948bc7bd9b
| 1,026
|
py
|
Python
|
scenarios/scenario_zone_estimation.py
|
rahowa/workzone
|
b6fd3241fdbc9463e0e7eb863f82f9524be50830
|
[
"MIT"
] | 1
|
2020-04-25T07:49:11.000Z
|
2020-04-25T07:49:11.000Z
|
scenarios/scenario_zone_estimation.py
|
rahowa/workzone
|
b6fd3241fdbc9463e0e7eb863f82f9524be50830
|
[
"MIT"
] | null | null | null |
scenarios/scenario_zone_estimation.py
|
rahowa/workzone
|
b6fd3241fdbc9463e0e7eb863f82f9524be50830
|
[
"MIT"
] | 1
|
2020-04-23T10:24:56.000Z
|
2020-04-23T10:24:56.000Z
|
import cv2
import requests
import numpy as np
def main_zone_estimation(address: str) -> None:
route = "workzone"
test_url = f"{address}/{route}"
content_type = 'image/jpeg'
headers = {'content-type': content_type}
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
while True:
ret, frame = cap.read()
if not ret:
break
_, image_to_send = cv2.imencode('.jpg', frame)
response = requests.post(test_url,
data=image_to_send.tostring(),
headers=headers)
zone_polygon = response.json()["workzone"]
zone_polygon = np.array(zone_polygon).reshape(-1, 1, 2)
zone_polygon = np.clip(zone_polygon, 0, np.inf)
frame = cv2.polylines(frame, [np.int32(zone_polygon)], True, (255, 0, 0), 2, cv2.LINE_AA)
cv2.imshow("test case", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| 29.314286
| 97
| 0.569201
|
406f1e6e1ab452ba24eade1291abbfe974f128f0
| 8,546
|
py
|
Python
|
jina/parsers/helper.py
|
Akshat-unt/jina
|
b0b058f99f3ee4dcbcbbf2acbf04c5d7e7e9c717
|
[
"Apache-2.0"
] | 1
|
2021-12-18T06:54:49.000Z
|
2021-12-18T06:54:49.000Z
|
jina/parsers/helper.py
|
Akshat-unt/jina
|
b0b058f99f3ee4dcbcbbf2acbf04c5d7e7e9c717
|
[
"Apache-2.0"
] | 2
|
2021-12-17T15:22:12.000Z
|
2021-12-18T07:19:06.000Z
|
jina/parsers/helper.py
|
Akshat-unt/jina
|
b0b058f99f3ee4dcbcbbf2acbf04c5d7e7e9c717
|
[
"Apache-2.0"
] | null | null | null |
"""Module for helper functions in the parser"""
import argparse
import os
import warnings
from typing import Tuple, List
_SHOW_ALL_ARGS = 'JINA_FULL_CLI' in os.environ
if _SHOW_ALL_ARGS:
from jina.logging.predefined import default_logger
default_logger.warning(
f'Setting {_SHOW_ALL_ARGS} will make remote Peas with sharding not work when using JinaD'
)
def add_arg_group(parser, title):
"""Add the arguments for a specific group to the parser
:param parser: the parser configure
:param title: the group name
:return: the new parser
"""
return parser.add_argument_group(f'{title} arguments')
class KVAppendAction(argparse.Action):
"""argparse action to split an argument into KEY=VALUE form
on the first = and append to a dictionary.
This is used for setting up --env
"""
def __call__(self, parser, args, values, option_string=None):
"""
call the KVAppendAction
.. # noqa: DAR401
:param parser: the parser
:param args: args to initialize the values
:param values: the values to add to the parser
:param option_string: inherited, not used
"""
import json, re
from ..helper import parse_arg
d = getattr(args, self.dest) or {}
for value in values:
try:
d.update(json.loads(value))
except json.JSONDecodeError:
try:
k, v = re.split(r'[:=]\s*', value, maxsplit=1)
except ValueError:
raise argparse.ArgumentTypeError(
f'could not parse argument \"{values[0]}\" as k=v format'
)
d[k] = parse_arg(v)
setattr(args, self.dest, d)
class _ColoredHelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
item_help = join([func(*args) for func, args in self.items])
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help.strip():
return ''
# add the heading if the section was non-empty
if self.heading is not argparse.SUPPRESS and self.heading is not None:
from ..helper import colored
current_indent = self.formatter._current_indent
captial_heading = ' '.join(
v[0].upper() + v[1:] for v in self.heading.split(' ')
)
heading = '%*s%s\n' % (
current_indent,
'',
colored(f'▮ {captial_heading}', 'cyan', attrs=['bold']),
)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def _get_help_string(self, action):
help_string = ''
if (
'%(default)' not in action.help
and action.default is not argparse.SUPPRESS
):
from ..helper import colored
defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
if isinstance(action, argparse._StoreTrueAction):
help_string = colored(
'default: %s'
% (
'enabled'
if action.default
else f'disabled, use "{action.option_strings[0]}" to enable it'
),
attrs=['dark'],
)
elif action.choices:
choices_str = f'{{{", ".join([str(c) for c in action.choices])}}}'
help_string = colored(
'choose from: ' + choices_str + '; default: %(default)s',
attrs=['dark'],
)
elif action.option_strings or action.nargs in defaulting_nargs:
help_string = colored(
'type: %(type)s; default: %(default)s', attrs=['dark']
)
return f'''
{help_string}
{action.help}
'''
def _join_parts(self, part_strings):
return '\n' + ''.join(
[part for part in part_strings if part and part is not argparse.SUPPRESS]
)
def _get_default_metavar_for_optional(self, action):
return ''
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is argparse.SUPPRESS:
del params[name]
for name in list(params):
if hasattr(params[name], '__name__'):
params[name] = params[name].__name__
return self._get_help_string(action) % params
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
if len(action.choices) > 4:
choice_strs = ', '.join([str(c) for c in action.choices][:4])
result = f'{{{choice_strs} ... {len(action.choices) - 4} more choices}}'
else:
choice_strs = ', '.join([str(c) for c in action.choices])
result = f'{{{choice_strs}}}'
else:
result = default_metavar
def formatter(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result,) * tuple_size
return formatter
def _split_lines(self, text, width):
return self._para_reformat(text, width)
def _fill_text(self, text, width, indent):
lines = self._para_reformat(text, width)
return '\n'.join(lines)
def _indents(self, line) -> Tuple[int, int]:
"""Return line indent level and "sub_indent" for bullet list text.
:param line: the line to check
:return: indentation of line and indentation of sub-items
"""
import re
indent = len(re.match(r'( *)', line).group(1))
list_match = re.match(r'( *)(([*\-+>]+|\w+\)|\w+\.) +)', line)
sub_indent = indent + len(list_match.group(2)) if list_match else indent
return (indent, sub_indent)
def _split_paragraphs(self, text):
"""Split text into paragraphs of like-indented lines.
:param text: the text input
:return: list of paragraphs
"""
import textwrap, re
text = textwrap.dedent(text).strip()
text = re.sub('\n\n[\n]+', '\n\n', text)
last_sub_indent = None
paragraphs = []
for line in text.splitlines():
(indent, sub_indent) = self._indents(line)
is_text = len(line.strip()) > 0
if is_text and indent == sub_indent == last_sub_indent:
paragraphs[-1] += ' ' + line
else:
paragraphs.append(line)
last_sub_indent = sub_indent if is_text else None
return paragraphs
def _para_reformat(self, text, width):
"""Format text, by paragraph.
:param text: the text to format
:param width: the width to apply
:return: the new text
"""
import textwrap
lines = []
for paragraph in self._split_paragraphs(text):
(indent, sub_indent) = self._indents(paragraph)
paragraph = self._whitespace_matcher.sub(' ', paragraph).strip()
new_lines = textwrap.wrap(
text=paragraph,
width=width,
initial_indent=' ' * indent,
subsequent_indent=' ' * sub_indent,
)
# Blank lines get eaten by textwrap, put it back
lines.extend(new_lines or [''])
return lines
_chf = _ColoredHelpFormatter
| 32.869231
| 97
| 0.547508
|
a577f395b4963e94d1d3039364bd52b32f9f2c38
| 1,203
|
py
|
Python
|
biblioteca/urls.py
|
gabriel-roque/django-study
|
f14b918c964e4399a16b2ed80fdf8816a13000dd
|
[
"MIT"
] | null | null | null |
biblioteca/urls.py
|
gabriel-roque/django-study
|
f14b918c964e4399a16b2ed80fdf8816a13000dd
|
[
"MIT"
] | null | null | null |
biblioteca/urls.py
|
gabriel-roque/django-study
|
f14b918c964e4399a16b2ed80fdf8816a13000dd
|
[
"MIT"
] | null | null | null |
"""biblioteca URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# PERIODO DE DESENVOLVIMENTO
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from .views import *
from clientes import urls as clientes_urls
urlpatterns = [
path('', include(clientes_urls)),
path('buscar/<nome>', buscarPessoa),
path('hello/', hello),
path('nome/<nome>', rota),
path('artigos/<int:year>', artigos),
path('admin/', admin.site.urls),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) # PERIODO DE DESENVOLVIMENTO
| 35.382353
| 94
| 0.718204
|
0ff3a6b6a5cff5928db63691086b9c396d80e29a
| 2,090
|
py
|
Python
|
scripts/manage_tool_dependencies.py
|
innovate-invent/galaxy
|
10aa953a40e171246bdd1804c74e8019da8e8200
|
[
"CC-BY-3.0"
] | 2
|
2017-10-23T14:44:12.000Z
|
2018-01-14T10:37:28.000Z
|
scripts/manage_tool_dependencies.py
|
innovate-invent/galaxy
|
10aa953a40e171246bdd1804c74e8019da8e8200
|
[
"CC-BY-3.0"
] | 30
|
2016-10-20T15:35:12.000Z
|
2018-10-02T15:59:54.000Z
|
scripts/manage_tool_dependencies.py
|
innovate-invent/galaxy
|
10aa953a40e171246bdd1804c74e8019da8e8200
|
[
"CC-BY-3.0"
] | 7
|
2016-11-03T19:11:01.000Z
|
2020-05-11T14:23:52.000Z
|
import os.path
import sys
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'lib')))
from galaxy.config import (
configure_logging,
find_path,
find_root,
parse_dependency_options,
)
from galaxy.tools.deps import CachedDependencyManager, DependencyManager, NullDependencyManager
from galaxy.util.script import main_factory
DESCRIPTION = "Script to manage tool dependencies (with focus on a Conda environments)."
def _init_if_needed(args, kwargs):
# If conda_auto_init is set, simply building the Conda resolver will call handle installation.
_build_dependency_manager_no_config(kwargs)
def _build_dependency_manager_no_config(kwargs):
"""Simplified variant of build_dependency_manager from galaxy.tools.deps.
The canonical factory method requires a full Galaxy configuration object
which we do not have available in this script (an optimization).
"""
configure_logging(kwargs)
root = find_root(kwargs)
dependency_resolvers_config_file = find_path(kwargs, "dependency_resolvers_config_file", root)
use_dependencies, tool_dependency_dir, use_cached_dependency_manager, tool_dependency_cache_dir, precache_dependencies = \
parse_dependency_options(kwargs, root, dependency_resolvers_config_file)
if not use_dependencies:
dependency_manager = NullDependencyManager()
else:
dependency_manager_kwds = {
'default_base_path': tool_dependency_dir,
'conf_file': dependency_resolvers_config_file,
'app_config': kwargs,
}
if use_cached_dependency_manager:
dependency_manager_kwds['tool_dependency_cache_dir'] = tool_dependency_cache_dir
dependency_manager = CachedDependencyManager(**dependency_manager_kwds)
else:
dependency_manager = DependencyManager(**dependency_manager_kwds)
return dependency_manager
ACTIONS = {
"init_if_needed": _init_if_needed,
}
if __name__ == '__main__':
main = main_factory(description=DESCRIPTION, actions=ACTIONS)
main()
| 34.262295
| 126
| 0.754545
|
36835fa3410d9d8b52eccda868313783b6d55683
| 11,650
|
py
|
Python
|
MetamorphicTests/all_mutants/sales_forecasting_file/245.py
|
anuragbms/Sales-forecasting-with-RNNs
|
22b4639ecbb48381af53326ace94a3538201b586
|
[
"Apache-2.0"
] | null | null | null |
MetamorphicTests/all_mutants/sales_forecasting_file/245.py
|
anuragbms/Sales-forecasting-with-RNNs
|
22b4639ecbb48381af53326ace94a3538201b586
|
[
"Apache-2.0"
] | null | null | null |
MetamorphicTests/all_mutants/sales_forecasting_file/245.py
|
anuragbms/Sales-forecasting-with-RNNs
|
22b4639ecbb48381af53326ace94a3538201b586
|
[
"Apache-2.0"
] | 1
|
2022-02-06T14:59:43.000Z
|
2022-02-06T14:59:43.000Z
|
def gen_mutants():
import tensorflow as tf
import pandas
import numpy as np
DATAFILE_TRAIN = 'mock_kaggle_edit_train.csv'
DATAFILE_VALIDATE = 'mock_kaggle_edit_validate.csv'
TRAINED_MODEL_PATH = 'savedModel'
TIME_STEPS = 10
NUMBER_OF_DAYS_TO_FORECAST = 1
BATCH_SIZE = 100
NUM_EPOCHS = 100
LSTM_UNITS = 250
TENSORBOARD_LOGDIR = 'tensorboard_log'
data_train = pandas.read_csv(DATAFILE_TRAIN)
data_validate = pandas.read_csv(DATAFILE_VALIDATE)
data_train.head()
numTrainingData = len(data_train)
numValidationData = len(data_validate)
trainingData_date = data_train['date'][0:numTrainingData]
trainingData_sales = data_train['sales'][0:numTrainingData]
trainindData_price = data_train['price'][0:numTrainingData]
validationData_date = data_validate['date'][0:numValidationData]
validationData_sales = data_validate['sales'][0:numValidationData]
validationData_price = data_validate['price'][0:numValidationData]
trainingData_sales.head()
print(len(trainingData_sales))
print(len(validationData_sales))
trainingData_sales_min = min(trainingData_sales)
trainingData_sales_max = max(trainingData_sales)
trainingData_sales_range = trainingData_sales_max - trainingData_sales_min
trainingData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in trainingData_sales]
validationData_sales_normalised = [(i - trainingData_sales_min) / trainingData_sales_range for i in validationData_sales]
print('Min:', trainingData_sales_min)
print('Range:', trainingData_sales_max - trainingData_sales_min)
trainingDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(trainingData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
trainingDataSequence_sales[start,:,0] = trainingData_sales_normalised[start:i]
targetDataSequence_sales[start] = trainingData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start = start + 1
[trainingDataSequence_sales[i,:,0] for i in range(3)]
[targetDataSequence_sales[i] for i in range(3)]
a = np.arange(len(targetDataSequence_sales))
np.random.shuffle(a)
trainingDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
targetDataSequence_sales_shuffle = np.zeros(shape=(((len(trainingData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
loc = 0
for i in a:
trainingDataSequence_sales_shuffle[loc] = trainingDataSequence_sales[i]
targetDataSequence_sales_shuffle[loc] = targetDataSequence_sales[i]
loc += 1
trainingDataSequence_sales = trainingDataSequence_sales_shuffle
targetDataSequence_sales = targetDataSequence_sales_shuffle
validationDataSequence_sales = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, TIME_STEPS, 1))
validationDataSequence_sales_target = np.zeros(shape=(((len(validationData_sales) - TIME_STEPS) - NUMBER_OF_DAYS_TO_FORECAST) + 1, NUMBER_OF_DAYS_TO_FORECAST))
start = 0
for i in range(TIME_STEPS, (len(validationData_sales) - NUMBER_OF_DAYS_TO_FORECAST) + 1):
validationDataSequence_sales[start,:,0] = validationData_sales_normalised[start:i]
validationDataSequence_sales_target[start] = validationData_sales_normalised[i:i + NUMBER_OF_DAYS_TO_FORECAST]
start += 1
tf.reset_default_graph()
inputSequencePlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, TIME_STEPS, 1), name='inputSequencePlaceholder')
targetPlaceholder = tf.placeholder(dtype=tf.float32, shape=(None, NUMBER_OF_DAYS_TO_FORECAST), name='targetPlaceholder')
cell = tf.nn.rnn_cell.LSTMCell(num_units=LSTM_UNITS, name='LSTM_cell')
(output, state) = tf.nn.dynamic_rnn(cell=cell, inputs=inputSequencePlaceholder, dtype=tf.float32)
lastCellOutput = output[:,-1,:]
print('output:', output)
print('state:', state)
print('lastCellOutput:', lastCellOutput)
weights = tf.Variable(initial_value=tf.truncated_normal(shape=(LSTM_UNITS, NUMBER_OF_DAYS_TO_FORECAST)))
bias = tf.Variable(initial_value=tf.ones(shape=NUMBER_OF_DAYS_TO_FORECAST))
forecast = tf.add(x=tf.matmul(a=lastCellOutput, b=weights), y=bias, name='forecast_normalised_scale')
forecast_originalScale = tf.add(x=forecast * trainingData_sales_range, y=trainingData_sales_min, name='forecast_original_scale')
print(forecast)
print(forecast_originalScale)
loss = tf.reduce_mean(tf.squared_difference(x=forecast, y=targetPlaceholder), name='loss_comp')
tf.summary.scalar(tensor=loss, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
minimize_step = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
tensorboard_writer = tf.summary.FileWriter(TENSORBOARD_LOGDIR, sess.graph)
all_summary_ops = tf.summary.merge_all()
numSteps = 0
for e in range(NUM_EPOCHS):
print('starting training for epoch:', e + 1)
startLocation = 0
iteration = 0
for iteration in range(int(len(targetDataSequence_sales) / BATCH_SIZE)):
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:startLocation + BATCH_SIZE]
(_, lsBatch, forecastBatch, forecastBatch_originalScale, summary_values) = sess.run([minimize_step, loss, forecast, forecast_originalScale, all_summary_ops], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
tensorboard_writer.add_summary(summary_values, numSteps)
numSteps += 1
if (iteration + 1) % 1 == 0:
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
startLocation += BATCH_SIZE
if len(targetDataSequence_sales) > startLocation:
print('epoch:', e + 1, ' iteration:', iteration + 1)
trainingBatchInput = trainingDataSequence_sales[startLocation:len(targetDataSequence_sales),:,:]
trainingBatchTarget = targetDataSequence_sales[startLocation:len(targetDataSequence_sales)]
(_, lsBatch, forecastBatch, forecastBatch_originalScale) = sess.run([minimize_step, loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: trainingBatchInput, \
targetPlaceholder: trainingBatchTarget})
print('got a loss of:', lsBatch)
print('the forecast of first 5 normalised are:', forecastBatch[0:5])
print('while the actuals were normalised :', trainingBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', forecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (trainingBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
totalValidationLoss = 0
startLocation = 0
print('starting validation')
for iter in range(len(validationDataSequence_sales) // BATCH_SIZE):
validationBatchInput = validationDataSequence_sales[startLocation:startLocation + BATCH_SIZE,:,:]
validationBatchTarget = validationDataSequence_sales_target[startLocation:startLocation + BATCH_SIZE]
(validationLsBatch, validationForecastBatch, validationForecastBatch_originalScale) = sess.run([loss, forecast, forecast_originalScale], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
startLocation += BATCH_SIZE
totalValidationLoss += validationLsBatch
print('first five predictions:', validationForecastBatch[0:5])
print('first five actuals :', validationBatchTarget[0:5])
print('the forecast of first 5 orignal scale are:', validationForecastBatch_originalScale[0:5])
print('while the actuals were original scale :', (validationBatchTarget[0:5] * trainingData_sales_range) + trainingData_sales_min)
if startLocation < len(validationDataSequence_sales):
validationBatchInput = validationDataSequence_sales[startLocation:len(validationDataSequence_sales)]
validationBatchTarget = validationDataSequence_sales_target[startLocation:len(validationDataSequence_sales)]
(validationLsBatch, validationForecastBatch) = sess.run([loss, forecast], feed_dict={inputSequencePlaceholder: validationBatchInput, \
targetPlaceholder: validationBatchTarget})
totalValidationLoss += validationLsBatch
print('Validation completed after epoch:', e + 1, '. Total validation loss:', totalValidationLoss)
print('----------- Saving Model')
tf.saved_model.simple_save(sess, export_dir=TRAINED_MODEL_PATH, inputs=\
{'inputSequencePlaceholder': inputSequencePlaceholder, 'targetPlaceholder': targetPlaceholder}, outputs=\
{'mutpy': loss, 'forecast_originalScale': forecast_originalScale})
print('saved model to:', TRAINED_MODEL_PATH)
print('----------- Finis')
| 31.233244
| 232
| 0.6297
|
216e02a95b95ac068e37a8d44cbb0f32bf14a933
| 9,242
|
py
|
Python
|
tests/components/stream/test_recorder.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | null | null | null |
tests/components/stream/test_recorder.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | 43
|
2021-04-21T08:08:13.000Z
|
2022-03-31T06:09:50.000Z
|
tests/components/stream/test_recorder.py
|
mikan-megane/core
|
837220cce40890e296920d33a623adbc11bd15a6
|
[
"Apache-2.0"
] | null | null | null |
"""The tests for hls streams."""
from __future__ import annotations
import asyncio
from collections import deque
from datetime import timedelta
from io import BytesIO
import logging
import os
import threading
from unittest.mock import patch
import async_timeout
import av
import pytest
from homeassistant.components.stream import create_stream
from homeassistant.components.stream.const import HLS_PROVIDER, RECORDER_PROVIDER
from homeassistant.components.stream.core import Segment
from homeassistant.components.stream.fmp4utils import get_init_and_moof_data
from homeassistant.components.stream.recorder import recorder_save_worker
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.stream.common import generate_h264_video
TEST_TIMEOUT = 7.0 # Lower than 9s home assistant timeout
MAX_ABORT_SEGMENTS = 20 # Abort test to avoid looping forever
class SaveRecordWorkerSync:
"""
Test fixture to manage RecordOutput thread for recorder_save_worker.
This is used to assert that the worker is started and stopped cleanly
to avoid thread leaks in tests.
"""
def __init__(self):
"""Initialize SaveRecordWorkerSync."""
self.reset()
self._segments = None
self._save_thread = None
def recorder_save_worker(self, file_out: str, segments: deque[Segment]):
"""Mock method for patch."""
logging.debug("recorder_save_worker thread started")
assert self._save_thread is None
self._segments = segments
self._save_thread = threading.current_thread()
self._save_event.set()
async def get_segments(self):
"""Return the recorded video segments."""
with async_timeout.timeout(TEST_TIMEOUT):
await self._save_event.wait()
return self._segments
async def join(self):
"""Verify save worker was invoked and block on shutdown."""
with async_timeout.timeout(TEST_TIMEOUT):
await self._save_event.wait()
self._save_thread.join(timeout=TEST_TIMEOUT)
assert not self._save_thread.is_alive()
def reset(self):
"""Reset callback state for reuse in tests."""
self._save_thread = None
self._save_event = asyncio.Event()
@pytest.fixture()
def record_worker_sync(hass):
"""Patch recorder_save_worker for clean thread shutdown for test."""
sync = SaveRecordWorkerSync()
with patch(
"homeassistant.components.stream.recorder.recorder_save_worker",
side_effect=sync.recorder_save_worker,
autospec=True,
):
yield sync
async def test_record_stream(hass, hass_client, record_worker_sync):
"""
Test record stream.
Tests full integration with the stream component, and captures the
stream worker and save worker to allow for clean shutdown of background
threads. The actual save logic is tested in test_recorder_save below.
"""
await async_setup_component(hass, "stream", {"stream": {}})
# Setup demo track
source = generate_h264_video()
stream = create_stream(hass, source)
with patch.object(hass.config, "is_allowed_path", return_value=True):
await stream.async_record("/example/path")
# After stream decoding finishes, the record worker thread starts
segments = await record_worker_sync.get_segments()
assert len(segments) >= 1
# Verify that the save worker was invoked, then block until its
# thread completes and is shutdown completely to avoid thread leaks.
await record_worker_sync.join()
stream.stop()
async def test_record_lookback(
hass, hass_client, stream_worker_sync, record_worker_sync
):
"""Exercise record with loopback."""
await async_setup_component(hass, "stream", {"stream": {}})
source = generate_h264_video()
stream = create_stream(hass, source)
# Start an HLS feed to enable lookback
stream.add_provider(HLS_PROVIDER)
stream.start()
with patch.object(hass.config, "is_allowed_path", return_value=True):
await stream.async_record("/example/path", lookback=4)
# This test does not need recorder cleanup since it is not fully exercised
stream.stop()
async def test_recorder_timeout(hass, hass_client, stream_worker_sync):
"""
Test recorder timeout.
Mocks out the cleanup to assert that it is invoked after a timeout.
This test does not start the recorder save thread.
"""
await async_setup_component(hass, "stream", {"stream": {}})
stream_worker_sync.pause()
with patch("homeassistant.components.stream.IdleTimer.fire") as mock_timeout:
# Setup demo track
source = generate_h264_video()
stream = create_stream(hass, source)
with patch.object(hass.config, "is_allowed_path", return_value=True):
await stream.async_record("/example/path")
recorder = stream.add_provider(RECORDER_PROVIDER)
await recorder.recv()
# Wait a minute
future = dt_util.utcnow() + timedelta(minutes=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert mock_timeout.called
stream_worker_sync.resume()
stream.stop()
await hass.async_block_till_done()
await hass.async_block_till_done()
async def test_record_path_not_allowed(hass, hass_client):
"""Test where the output path is not allowed by home assistant configuration."""
await async_setup_component(hass, "stream", {"stream": {}})
# Setup demo track
source = generate_h264_video()
stream = create_stream(hass, source)
with patch.object(
hass.config, "is_allowed_path", return_value=False
), pytest.raises(HomeAssistantError):
await stream.async_record("/example/path")
async def test_recorder_save(tmpdir):
"""Test recorder save."""
# Setup
source = generate_h264_video()
filename = f"{tmpdir}/test.mp4"
# Run
recorder_save_worker(
filename, [Segment(1, *get_init_and_moof_data(source.getbuffer()), 4)]
)
# Assert
assert os.path.exists(filename)
async def test_recorder_discontinuity(tmpdir):
"""Test recorder save across a discontinuity."""
# Setup
source = generate_h264_video()
filename = f"{tmpdir}/test.mp4"
# Run
init, moof_data = get_init_and_moof_data(source.getbuffer())
recorder_save_worker(
filename,
[
Segment(1, init, moof_data, 4, 0),
Segment(2, init, moof_data, 4, 1),
],
)
# Assert
assert os.path.exists(filename)
async def test_recorder_no_segments(tmpdir):
"""Test recorder behavior with a stream failure which causes no segments."""
# Setup
filename = f"{tmpdir}/test.mp4"
# Run
recorder_save_worker("unused-file", [])
# Assert
assert not os.path.exists(filename)
async def test_record_stream_audio(
hass, hass_client, stream_worker_sync, record_worker_sync
):
"""
Test treatment of different audio inputs.
Record stream output should have an audio channel when input has
a valid codec and audio packets and no audio channel otherwise.
"""
await async_setup_component(hass, "stream", {"stream": {}})
for a_codec, expected_audio_streams in (
("aac", 1), # aac is a valid mp4 codec
("pcm_mulaw", 0), # G.711 is not a valid mp4 codec
("empty", 0), # audio stream with no packets
(None, 0), # no audio stream
):
record_worker_sync.reset()
stream_worker_sync.pause()
# Setup demo track
source = generate_h264_video(
container_format="mov", audio_codec=a_codec
) # mov can store PCM
stream = create_stream(hass, source)
with patch.object(hass.config, "is_allowed_path", return_value=True):
await stream.async_record("/example/path")
recorder = stream.add_provider(RECORDER_PROVIDER)
while True:
segment = await recorder.recv()
if not segment:
break
last_segment = segment
stream_worker_sync.resume()
result = av.open(
BytesIO(last_segment.init + last_segment.moof_data), "r", format="mp4"
)
assert len(result.streams.audio) == expected_audio_streams
result.close()
stream.stop()
await hass.async_block_till_done()
# Verify that the save worker was invoked, then block until its
# thread completes and is shutdown completely to avoid thread leaks.
await record_worker_sync.join()
async def test_recorder_log(hass, caplog):
"""Test starting a stream to record logs the url without username and password."""
await async_setup_component(hass, "stream", {"stream": {}})
stream = create_stream(hass, "https://abcd:efgh@foo.bar")
with patch.object(hass.config, "is_allowed_path", return_value=True):
await stream.async_record("/example/path")
assert "https://abcd:efgh@foo.bar" not in caplog.text
assert "https://****:****@foo.bar" in caplog.text
| 32.202091
| 86
| 0.691301
|
f2c252ed627d49e8daaf55e83108367d0057328a
| 547
|
py
|
Python
|
ACME/topology/poly2lin.py
|
mauriziokovacic/ACME
|
2615b66dd4addfd5c03d9d91a24c7da414294308
|
[
"MIT"
] | 3
|
2019-10-23T23:10:55.000Z
|
2021-09-01T07:30:14.000Z
|
ACME/topology/poly2lin.py
|
mauriziokovacic/ACME-Python
|
2615b66dd4addfd5c03d9d91a24c7da414294308
|
[
"MIT"
] | null | null | null |
ACME/topology/poly2lin.py
|
mauriziokovacic/ACME-Python
|
2615b66dd4addfd5c03d9d91a24c7da414294308
|
[
"MIT"
] | 1
|
2020-07-11T11:35:43.000Z
|
2020-07-11T11:35:43.000Z
|
from ..utility.row import *
from ..utility.col import *
from ..utility.flatten import *
from ..utility.indices import *
from ..utility.repmat import *
def poly2lin(T):
"""
Returns the indices of the topology nodes, along with their
respective polygon indices.
Parameters
----------
T : LongTensor
the topology tensor
Returns
-------
(LongTensor,LongTensor)
the nodes indices and the respective polygon indices
"""
return flatten(T), repmat(indices(0, col(T)-1), row(T), 1)
| 21.88
| 63
| 0.628885
|
514f8a2f0a4a33fbd1427cfa46702c0e44f75ee0
| 5,823
|
py
|
Python
|
docs/markdown_generator.py
|
kerenleibovich/mlapp
|
0b8dfaba7a7070ab68cb29ff61dd1c7dd8076693
|
[
"Apache-2.0"
] | 33
|
2021-02-26T10:41:09.000Z
|
2021-11-07T12:35:32.000Z
|
docs/markdown_generator.py
|
kerenleibovich/mlapp
|
0b8dfaba7a7070ab68cb29ff61dd1c7dd8076693
|
[
"Apache-2.0"
] | 17
|
2021-03-04T15:37:21.000Z
|
2021-04-06T12:00:13.000Z
|
docs/markdown_generator.py
|
kerenleibovich/mlapp
|
0b8dfaba7a7070ab68cb29ff61dd1c7dd8076693
|
[
"Apache-2.0"
] | 9
|
2021-03-03T20:02:41.000Z
|
2021-10-05T13:03:56.000Z
|
import os
import re
# This script will go over the modules listed in libraries and generates an md file with all docstrings for each one of
# modules using the third party library - pydocmd.
# It then does some editing on the file so it shows nicely in github's wiki.
# notice that pydoc will only list the outermost definitions, so docstrings from functions nested under a class will NOT
# be documented in the markdown.
#
# this requires the following setup:
# pip install pydoc-markdown mkdocs mkdocs-material
# pip install pygments (required for codehilite)
#
# docstrings should be in a a reStructuredTxt format (pycharm's default).
# This script expects Code examples (within the docstring to be written between @@@ tags @@@@
# i.e (@@@word@@@@= > ```word```).
# ------------------------------------ Generate markdown -----------------------------------
docs_folder = './'
libraries = [
# api
# {
# 'src': 'mlapp.utils.automl ',
# 'dest': 'api/utils.automl.md'
# },
# {
# 'src': 'mlapp.utils.features.pandas ',
# 'dest': 'api/utils.features.pandas.md'
# },
# {
# 'src': 'mlapp.utils.features.spark ',
# 'dest': 'api/utils.features.spark.md'
# },
# {
# 'src': 'mlapp.utils.metrics.pandas ',
# 'dest': 'api/utils.metrics.pandas.md'
# },
# {
# 'src': 'mlapp.utils.metrics.spark ',
# 'dest': 'api/utils.metrics.spark.md'
# },
# managers
# {
# 'src': 'mlapp.managers.user_managers',
# 'dest': 'api/managers.md',
# },
# # handlers
# {
# 'src': 'mlapp.handlers.databases.database_interface',
# 'dest': 'api/handlers.database.md',
# 'exclude': ['update_job_running']
# },
# {
# 'src': 'mlapp.handlers.file_storages.file_storage_interface',
# 'dest': 'api/handlers.file_storage.md'
# },
# {
# 'src': 'mlapp.handlers.message_queues.message_queue_interface',
# 'dest': 'api/handlers.message_queue.md'
# },
# {
# 'src': 'mlapp.handlers.spark.spark_interface',
# 'dest': 'api/handlers.spark.md'
# }
]
def create_documentation(file_name, create_main_table_of_contents=False, mkdocs_format=False, exclusion_dict=None):
if not exclusion_dict:
exclusion_dict = {}
with open(file_name, 'r') as f:
line = True
temp = ''
module_exclude_flag = False
function_exclude_flag = False
param_section = False
output = ''
while line:
line = f.readline()
# exclude functions from the docs:
if function_exclude_flag:
if line.startswith('#') and line[3:-1] not in list(exclusion_dict.values())[0]:
function_exclude_flag = False
else:
continue
elif module_exclude_flag:
if (line.startswith('# ')) and (line[:-1] not in list(exclusion_dict.keys())):
module_exclude_flag = False # made it to a new module in the file. turn flag off.
elif line[3:-1] in list(exclusion_dict.values())[0]: # this function needs to be excluded
function_exclude_flag = True
continue
elif exclusion_dict: # there are functions to exclude in one of these files.
if line[2:-1] in list(exclusion_dict.keys()): # this module has functions to exclude!
module_exclude_flag = True
# if line starts with a lot of spaces, turn them to   to preseve indentation:
if re.search(r'[^\s]', line):
if re.search(r'[^\s]', line).start() > 5:
numSpaces = re.search(r'[^\s]', line).start()
line = ' '*(numSpaces-3) + line[numSpaces:]
# turn the param section into a beautiful html table:
if re.search(r'(:{1}\w*)\s*(\**\w*:{1})', line): # match ":param: x"
if not re.search(r'^:return:', line):
temp += line
param_section = True
else:
temp = re.sub(r'(:{1}\w*)\s*(\**\w*:{1})', r'<br/><b><i>\2</b></i>', temp)
temp = temp[5:] # remove the leading <br/> I added..
temp2 = re.sub(r'(:return:)', r'', line)
line = '<table style="width:100%"><tr><td valign="top"><b><i>Parameters:</b></i></td>' \
'<td valign="top">'+temp+'</td></tr><tr><td valign="top"><b><i>Returns:</b></i>' \
'</td><td valign="top">'+temp2+'</td></tr></table>'
output = output + '\n<br/>' + line + '\n<br/>\n'
temp = ''
param_section = False
else:
if param_section:
temp += line
else:
output = output + line
# convert examples to valid markdown.
if mkdocs_format:
output = re.sub('@{4}', '', output)
output = re.sub('@{3}', '\n!!! note "Reference:"', output)
else:
output = re.sub('@{3,4}', '\n ```', output)
return output
def run(mkdocs=False):
# os.chdir('..')
for lib in libraries:
script = lib['src']
md = docs_folder + lib['dest']
command = 'pydoc-markdown -m' + ''.join(script) + ' > ' + md
os.system(command)
exclusion_dict = {lib['src'][:-2]: lib['exclude']} if 'exclude' in lib else {}
output = create_documentation(md, False, mkdocs, exclusion_dict)
# Save changes to file
with open(md, 'w') as f:
f.write(output)
if __name__ == '__main__':
run(mkdocs=False)
| 38.058824
| 120
| 0.522926
|
de5b4b4330761625b6f156b70087388573ec593f
| 667
|
py
|
Python
|
client/pylookyloo/api.py
|
robertdigital/lookyloo
|
c9cbf27accac519e7060edc1feeba364c0b8729a
|
[
"BSD-3-Clause"
] | 1
|
2020-02-05T16:50:17.000Z
|
2020-02-05T16:50:17.000Z
|
client/pylookyloo/api.py
|
robertdigital/lookyloo
|
c9cbf27accac519e7060edc1feeba364c0b8729a
|
[
"BSD-3-Clause"
] | null | null | null |
client/pylookyloo/api.py
|
robertdigital/lookyloo
|
c9cbf27accac519e7060edc1feeba364c0b8729a
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import json
from urllib.parse import urljoin
class Lookyloo():
def __init__(self, root_url: str='https://lookyloo.circl.lu/'):
self.root_url = root_url
if not self.root_url.endswith('/'):
self.root_url += '/'
self.session = requests.session()
@property
def is_up(self):
r = self.session.head(self.root_url)
return r.status_code == 200
def enqueue(self, url: str):
response = self.session.post(urljoin(self.root_url, 'submit'), data=json.dumps({'url': url}))
return urljoin(self.root_url, f'tree/{response.text}')
| 25.653846
| 101
| 0.626687
|
0d7557e2da4dedcf3431e93544e46bd147f0d844
| 34
|
py
|
Python
|
pylinenotify/__init__.py
|
reud/PyLineNotify
|
da0ab2591f9087919ca549412756ddbbcb607983
|
[
"MIT"
] | 3
|
2019-03-09T08:28:41.000Z
|
2020-02-24T00:51:05.000Z
|
pylinenotify/__init__.py
|
reud/PyLineNotify
|
da0ab2591f9087919ca549412756ddbbcb607983
|
[
"MIT"
] | 3
|
2019-02-05T03:54:19.000Z
|
2020-03-31T01:37:29.000Z
|
pylinenotify/__init__.py
|
reud/PyLineNotify
|
da0ab2591f9087919ca549412756ddbbcb607983
|
[
"MIT"
] | 2
|
2020-04-25T16:12:33.000Z
|
2021-06-08T04:40:01.000Z
|
from pylinenotify.notifer import *
| 34
| 34
| 0.852941
|
cc83268cd4b2a730f9818a8eeb293fef290a058b
| 1,702
|
py
|
Python
|
setup.py
|
Hwesta/python-client-sword2
|
db1660d6a48fa1f6c69b97b3ece7916fdafc555c
|
[
"MIT"
] | null | null | null |
setup.py
|
Hwesta/python-client-sword2
|
db1660d6a48fa1f6c69b97b3ece7916fdafc555c
|
[
"MIT"
] | 2
|
2017-02-27T11:44:44.000Z
|
2021-01-28T13:13:13.000Z
|
setup.py
|
Hwesta/python-client-sword2
|
db1660d6a48fa1f6c69b97b3ece7916fdafc555c
|
[
"MIT"
] | 2
|
2016-12-08T14:48:27.000Z
|
2021-01-19T08:56:10.000Z
|
from setuptools import setup, find_packages
import sys, os
version = '0.1'
try:
from mercurial import ui, hg, error
repo = hg.repository(ui.ui(), ".")
ver = repo[version]
except ImportError:
pass
except error.RepoLookupError:
tip = repo["tip"]
version = version + ".%s.%s" % (tip.rev(), tip.hex()[:12])
except error.RepoError:
pass
setup(
name='sword2',
version=version,
description="SWORD v2 python client",
long_description="""\
SWORD v2 python client""",
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Environment :: Web Environment",
#"Framework :: Paste",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.6",
"Topic :: Internet",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords="sword-app atom sword2 http",
author="Ben O'Steen",
author_email='bosteen@gmail.com',
url="http://swordapp.org/",
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
"httplib2",
],
# Following left in as a memory aid for later-
#entry_points="""
# # -*- Entry points: -*-
# [console_scripts]
# cmd=module.path:func_name
#""",
)
| 29.344828
| 77
| 0.610458
|
ed754326aef8abc5b8f48c3870d574d513272228
| 2,072
|
py
|
Python
|
setup/05-negative_testing_rule.py
|
eficode-academy/rf-katas-sl
|
bcb24ffa151db0abd13720b403cdb3af9bc3e743
|
[
"MIT"
] | 10
|
2020-09-28T10:27:39.000Z
|
2022-02-22T08:16:08.000Z
|
setup/05-negative_testing_rule.py
|
eficode-academy/rf-katas-sl
|
bcb24ffa151db0abd13720b403cdb3af9bc3e743
|
[
"MIT"
] | 11
|
2020-10-07T08:46:16.000Z
|
2022-02-16T06:36:53.000Z
|
setup/05-negative_testing_rule.py
|
eficode-academy/rf-katas-sl
|
bcb24ffa151db0abd13720b403cdb3af9bc3e743
|
[
"MIT"
] | 16
|
2020-10-04T11:27:06.000Z
|
2022-02-22T14:45:27.000Z
|
from rflint.common import KeywordRule, SuiteRule, TestRule, ERROR
from static import normalize
MUST_KEYWORDS = [
"Enter Username",
"Enter Password",
"Submit Login Form",
"Verify That Error Page Is Visible"
]
SETUP_KEYWORD = "Open Browser To Login Page"
class KeywordImplementationRule05(KeywordRule):
severity = ERROR
def apply(self, keyword):
report = False
name = normalize(keyword.name)
if name == MUST_KEYWORDS[0] or name == MUST_KEYWORDS[1]:
if len(keyword.settings) == 0 or (not "[Arguments]" in keyword.settings[0]):
report = True
if report:
self.report(keyword, "Did you remember to use keyword arguments?", keyword.linenumber)
class TestCaseImplementation05(TestRule):
severity = ERROR
def apply(self, test):
default_message = "Check that you've implemented test case {} as instructed: ".format(test.name)
default_message += "{} is expected as a setup or part of the test. {} are required as part of the test".format(SETUP_KEYWORD, ", ".join(MUST_KEYWORDS))
test_steps = []
for step in test.steps:
if len(step) > 1:
test_steps.append(normalize(step[1]))
has_failures = False
if len(test_steps) == 5 and test_steps != [SETUP_KEYWORD] + MUST_KEYWORDS:
has_failures = True
elif len(test_steps) == 4:
has_setup = False
setup = list(filter(lambda s: "test setup" in str(s).lower(), test.parent.settings))[0]
if SETUP_KEYWORD in normalize(str(setup)) and not setup.is_comment():
has_setup = True
if not has_setup:
for setting in test.settings:
s = normalize(str(setting))
if SETUP_KEYWORD in s and "[Setup]" in s:
has_setup = True
break
if not has_setup:
has_failures = True
if has_failures:
self.report(test, default_message, test.linenumber)
| 37
| 159
| 0.601351
|
aa5f3c963ff4ae8bd9c79be97636e00c31b64ef7
| 5,023
|
py
|
Python
|
mla/neuralnet/nnet.py
|
Gewissta/MLAlgorithms
|
62633dd30230a8bdc13826b37ea51ce39df69fb9
|
[
"MIT"
] | 5
|
2019-09-23T19:52:40.000Z
|
2022-01-07T20:12:49.000Z
|
mla/neuralnet/nnet.py
|
Gewissta/MLAlgorithms
|
62633dd30230a8bdc13826b37ea51ce39df69fb9
|
[
"MIT"
] | null | null | null |
mla/neuralnet/nnet.py
|
Gewissta/MLAlgorithms
|
62633dd30230a8bdc13826b37ea51ce39df69fb9
|
[
"MIT"
] | 1
|
2020-07-20T22:52:57.000Z
|
2020-07-20T22:52:57.000Z
|
import logging
import numpy as np
from autograd import elementwise_grad
from mla.base import BaseEstimator
from mla.metrics.metrics import get_metric
from mla.neuralnet.layers import PhaseMixin
from mla.neuralnet.loss import get_loss
from mla.utils import batch_iterator
np.random.seed(9999)
"""
Architecture inspired from:
https://github.com/fchollet/keras
https://github.com/andersbll/deeppy
"""
class NeuralNet(BaseEstimator):
fit_required = False
def __init__(
self, layers, optimizer, loss, max_epochs=10, batch_size=64, metric="mse", shuffle=False, verbose=True
):
self.verbose = verbose
self.shuffle = shuffle
self.optimizer = optimizer
self.loss = get_loss(loss)
# TODO: fix
if loss == "categorical_crossentropy":
self.loss_grad = lambda actual, predicted: -(actual - predicted)
else:
self.loss_grad = elementwise_grad(self.loss, 1)
self.metric = get_metric(metric)
self.layers = layers
self.batch_size = batch_size
self.max_epochs = max_epochs
self._n_layers = 0
self.log_metric = True if loss != metric else False
self.metric_name = metric
self.bprop_entry = self._find_bprop_entry()
self.training = False
self._initialized = False
def _setup_layers(self, x_shape):
"""Initialize model's layers."""
x_shape = list(x_shape)
x_shape[0] = self.batch_size
for layer in self.layers:
layer.setup(x_shape)
x_shape = layer.shape(x_shape)
self._n_layers = len(self.layers)
# Setup optimizer
self.optimizer.setup(self)
self._initialized = True
logging.info("Total parameters: %s" % self.n_params)
def _find_bprop_entry(self):
"""Find entry layer for back propagation."""
if len(self.layers) > 0 and not hasattr(self.layers[-1], "parameters"):
return -1
return len(self.layers)
def fit(self, X, y=None):
if not self._initialized:
self._setup_layers(X.shape)
if y.ndim == 1:
# Reshape vector to matrix
y = y[:, np.newaxis]
self._setup_input(X, y)
self.is_training = True
# Pass neural network instance to an optimizer
self.optimizer.optimize(self)
self.is_training = False
def update(self, X, y):
# Forward pass
y_pred = self.fprop(X)
# Backward pass
grad = self.loss_grad(y, y_pred)
for layer in reversed(self.layers[: self.bprop_entry]):
grad = layer.backward_pass(grad)
return self.loss(y, y_pred)
def fprop(self, X):
"""Forward propagation."""
for layer in self.layers:
X = layer.forward_pass(X)
return X
def _predict(self, X=None):
if not self._initialized:
self._setup_layers(X.shape)
y = []
X_batch = batch_iterator(X, self.batch_size)
for Xb in X_batch:
y.append(self.fprop(Xb))
return np.concatenate(y)
@property
def parametric_layers(self):
for layer in self.layers:
if hasattr(layer, "parameters"):
yield layer
@property
def parameters(self):
"""Returns a list of all parameters."""
params = []
for layer in self.parametric_layers:
params.append(layer.parameters)
return params
def error(self, X=None, y=None):
"""Calculate an error for given examples."""
training_phase = self.is_training
if training_phase:
# Temporally disable training.
# Some layers work differently while training (e.g. Dropout).
self.is_training = False
if X is None and y is None:
y_pred = self._predict(self.X)
score = self.metric(self.y, y_pred)
else:
y_pred = self._predict(X)
score = self.metric(y, y_pred)
if training_phase:
self.is_training = True
return score
@property
def is_training(self):
return self.training
@is_training.setter
def is_training(self, train):
self.training = train
for layer in self.layers:
if isinstance(layer, PhaseMixin):
layer.is_training = train
def shuffle_dataset(self):
"""Shuffle rows in the dataset."""
n_samples = self.X.shape[0]
indices = np.arange(n_samples)
np.random.shuffle(indices)
self.X = self.X.take(indices, axis=0)
self.y = self.y.take(indices, axis=0)
@property
def n_layers(self):
"""Returns the number of layers."""
return self._n_layers
@property
def n_params(self):
"""Return the number of trainable parameters."""
return sum([layer.parameters.n_params for layer in self.parametric_layers])
def reset(self):
self._initialized = False
| 29.034682
| 110
| 0.606211
|
6b419cbcc7d12505763aeb8e89943fb2866586c4
| 12,090
|
py
|
Python
|
geomodelgrids/scripts/generate_points.py
|
baagaard-usgs/geomodelgrids
|
9b3b6879038691cf686a79d95b1c33092f154258
|
[
"RSA-MD"
] | 2
|
2020-09-01T23:09:02.000Z
|
2020-09-10T02:37:02.000Z
|
geomodelgrids/scripts/generate_points.py
|
baagaard-usgs/geomodelgrids
|
9b3b6879038691cf686a79d95b1c33092f154258
|
[
"RSA-MD"
] | 88
|
2019-10-04T18:19:19.000Z
|
2021-10-20T01:54:08.000Z
|
geomodelgrids/scripts/generate_points.py
|
baagaard-usgs/geomodelgrids
|
9b3b6879038691cf686a79d95b1c33092f154258
|
[
"RSA-MD"
] | 1
|
2019-10-18T17:17:31.000Z
|
2019-10-18T17:17:31.000Z
|
#!/usr/bin/env python
# ======================================================================
#
# Brad T. Aagaard
# U.S. Geological Survey
#
# ======================================================================
#
import math
import logging
import numpy
import os
class Block(object):
"""Uniform resolution gridded block within a model.
"""
def __init__(self, name, res_horiz, res_vert, z_top, z_bot, z_top_offset, domain_x, domain_y, domain_z):
"""Constructor.
:param name: Name of block.
:param res_horiz: Horizontal resolution of block in meters.
:param res_vert: Vertical resolution of block in meters.
:param z_top: Elevation of top of of block in meters.
:param z_bot: Elevation of bottom of block in meters.
:param z_top_offset: Amount to offset top elevation of block in meters.
"""
self.name = name
self.res_horiz = res_horiz
self.res_vert = res_vert
self.z_top = z_top
self.z_bot = z_bot
self.z_top_offset = z_top_offset
self.domain_z = domain_z
self.num_x = 1 + int(domain_x / self.res_horiz)
self.num_y = 1 + int(domain_y / self.res_horiz)
self.num_z = 1 + int((self.z_top-self.z_bot) / self.res_vert)
return
def points(self, y_azimuth, origin_x, origin_y, topography):
"""Create array of points spanning the block.
:return: 3D array of points for the block.
"""
logger = logging.getLogger(__name__)
logger.info("Block '%s' contains %d points (%d x %d x %d)." % (self.name, self.num_x*self.num_y*self.num_z, self.num_x, self.num_y, self.num_z,))
x1 = numpy.linspace(0.0, self.res_horiz*(self.num_x-1), self.num_x)
y1 = numpy.linspace(0.0, self.res_horiz*(self.num_y-1), self.num_y)
z1 = numpy.linspace(0.0, self.res_vert*(self.num_z-1), self.num_z)
x,y,z = numpy.meshgrid(x1, y1, z1)
domain_top = 0.0
domain_bot = -self.domain_z
if topography is not None:
topoG = self._getBlockTopography(topography)
for i in xrange(self.num_z):
z[:,:,i] = domain_bot + (topoG-domain_bot)/(domain_top-domain_bot)*(self.z_top - z[:,:,i] - domain_bot)
# Move top points down
z[:,:,0] += self.z_top_offset
else:
z = self.z_top - z
xyzG = numpy.vstack((x.ravel(), y.ravel(), z.ravel(),)).transpose()
xyzP = numpy.zeros(xyzG.shape)
azRad = y_azimuth * math.pi / 180.0
xyzP[:,0] = origin_x + xyzG[:,0]*math.cos(azRad) + xyzG[:,1]*math.sin(azRad)
xyzP[:,1] = origin_y - xyzG[:,0]*math.sin(azRad) + xyzG[:,1]*math.cos(azRad)
xyzP[:,2] = xyzG[:,2]
return xyzP
def groundsurf_xy(self, y_azimuth, origin_x, origin_y):
"""Create array of points on the ground surface spanning the block.
:return: 2D array of points on the ground surface.
"""
logger = logging.getLogger(__name__)
logger.info("Topography surface contains %d x %d points." % (self.num_x, self.num_y,))
x1 = numpy.linspace(0.0, self.res_horiz*(self.num_x-1), self.num_x)
y1 = numpy.linspace(0.0, self.res_horiz*(self.num_y-1), self.num_y)
x,y = numpy.meshgrid(x1, y1)
xyG = numpy.vstack((x.ravel(), y.ravel(),)).transpose()
xyP = numpy.zeros(xyG.shape)
azRad = y_azimuth*math.pi/180.0
xyP[:,0] = origin_x + xyG[:,0]*math.cos(azRad) + xyG[:,1]*math.sin(azRad)
xyP[:,1] = origin_y - xyG[:,0]*math.sin(azRad) + xyG[:,1]*math.cos(azRad)
return xyP
def _getBlockTopography(self, topography):
"""Get topography grid for block.
"""
npts = topography.shape[0]
n = self.num_x - 1
m = self.num_y - 1
skip = int((-(n+m)+((n+m)**2-4*n*m*(1-npts))**0.5)/(2*n*m))
if (n*skip+1)*(m*skip+1) != npts:
raise ValueError("Nonintegral number of points for block relative to topography. Topography has %d points. Block has %d x %d points." % (npts, self.num_x, self.num_y,))
topoG = topography.reshape((m*skip+1, n*skip+1,))
topoG = topoG[::skip,::skip]
return topoG
class Model(object):
"""Georeferenced model composed of uniformly gridded blocks.
"""
def __init__(self):
"""Constructor.
"""
self.config = None
self.description = None
self.key = None
self.topography = None
return
def initialize(self, config_filename, data_dir=None):
"""Initialize the model.
:param config_filename: Name of model configuration (INI format) file.
"""
import ConfigParser
parser = ConfigParser.ConfigParser()
parser.read(config_filename)
self.description = parser.get("geomodelgrids", "description")
self.key = parser.get("geomodelgrids", "key")
self.config = parser._sections
self.domain_x = parser.getfloat("domain", "dim_x")
self.domain_y = parser.getfloat("domain", "dim_y")
self.domain_z = parser.getfloat("domain", "dim_z")
self.y_azimuth = parser.getfloat("coordsys", "y_azimuth")
self.origin_x = parser.getfloat("coordsys", "origin_x")
self.origin_y = parser.getfloat("coordsys", "origin_y")
self.blocks = []
for block_name in self._config_list(parser.get("domain", "blocks")):
res_horiz = parser.getfloat(block_name, "res_horiz")
res_vert = parser.getfloat(block_name, "res_vert")
z_top = parser.getfloat(block_name, "z_top")
z_top_offset = parser.getfloat(block_name, "z_top_offset")
z_bot = parser.getfloat(block_name, "z_bot")
self.blocks.append(Block(block_name, res_horiz, res_vert, z_top, z_bot, z_top_offset, self.domain_x, self.domain_y, self.domain_z))
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
self.data_dir = data_dir
return
def write_surfxy(self):
import os
import datetime
block = self.blocks[0]
header = (
"Generated with %(script)s by %(user)s on %(date)s.\n"
"\n"
"XY grid for topography\n"
"\n"
"Model: %(model)s\n"
"res_horiz: %(res_horiz).1f m\n"
"num_x: %(num_x)d\n"
"num_y: %(num_y)d\n"
% {"script": __file__,
"user": os.environ["USER"],
"date": datetime.datetime.now(),
"model": self.key,
"res_horiz": block.res_horiz,
"num_x": block.num_x,
"num_y": block.num_y,
},)
filename = "%s/%s-topo-xy.txt.gz" % (self.data_dir, self.key,)
points = block.groundsurf_xy(self.y_azimuth, self.origin_x, self.origin_y)
numpy.savetxt(filename, points, fmt="%16.8e", header=header[0])
return
def write_blocks(self):
import os
import datetime
self._loadTopography()
if "external_z_units" in self.config["domain"]:
z_units = self.config["domain"]["external_z_units"]
else:
z_units = "m"
for block in self.blocks:
header = (
"Generated with %(script)s by %(user)s on %(date)s.\n"
"\n"
"Model: %(model)s\n"
"Block: %(block)s\n"
"res_horiz: %(res_horiz).1f m\n"
"res_vert: %(res_vert).1f m\n"
"z_top: %(z_top).1f m\n"
"z_top_offset: %(z_top_offset).1f m\n"
"num_x: %(num_x)d\n"
"num_y: %(num_y)d\n"
"num_z: %(num_z)d\n"
"z_units: %(z_units)s\n"
% {"script": __file__,
"user": os.environ["USER"],
"date": datetime.datetime.now(),
"model": self.key,
"block": block.name,
"res_horiz": block.res_horiz,
"res_vert": block.res_vert,
"z_top": block.z_top,
"z_top_offset": block.z_top_offset,
"num_x": block.num_x,
"num_y": block.num_y,
"num_z": block.num_z,
"z_units": z_units,
},)
points = block.points(self.y_azimuth, self.origin_x, self.origin_y, self.topography)
# Convert to output units
if z_units in ["m", "meter", "meters"]:
pass
elif z_units in ["ft", "feet"]:
points[:,2] /= 0.3048
else:
raise ValueError("Unknown units '%s' for external z coordinate." % z_units)
filename = "%s/%s-%s-xyz.txt.gz" % (self.data_dir, self.key, block.name,)
numpy.savetxt(filename, points, fmt="%20.12e, %20.12e, %14.6e", header=header[0])
return
def _loadTopography(self):
if "topography" in self.config["domain"]:
filename = "%s/%s" % (self.data_dir, self.config["domain"]["topography"])
self.topography = numpy.loadtxt(filename)[:,2]
# Convert to meters
if "external_z_units" in self.config["domain"]:
z_units = self.config["domain"]["external_z_units"]
if z_units in ["m", "meter", "meters"]:
pass
elif z_units in ["ft", "feet"]:
self.topography *= 0.3048
else:
raise ValueError("Unknown units '%s' for external z coordinate." % z_units)
else:
self.topography = None
return
def _config_list(self, list_string):
l = [f.strip() for f in list_string[1:-1].split(",")]
return l
class App(object):
"""Create xyz grid files, one file per block with points in physical space.
The intent is that external modeling tools provide the values at
these points. This step can be skipped if the model is already
discretized in a suite of blocks.
"""
def __init__(self, model_config, data_dir):
"""Constructor.
"""
self.model_config = model_config
self.data_dir = data_dir
return
def run(self, points):
"""Generate points in the model blocks.
:param points: If points == "groundsurf", generate points on ground surface. If points == "blocks", generate points in each block.
"""
self.model = Model()
self.model.initialize(self.model_config, self.data_dir)
if points == "groundsurf":
self.model.write_surfxy()
elif points == "blocks":
self.model.write_blocks()
else:
raise ValueError("Unknown value '%s' for points." % points)
return
# ======================================================================
if __name__ == "__main__":
DESCRIPTION = "Application for generating files with xyz grid points "
"associated with the points in the blocks of a grid-based model. One "
"file is written for each block of points."
import argparse
import logging
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument("--data-dir", action="store", dest="data_dir", default=".")
parser.add_argument("--model", action="store", dest="model_config", required=True)
parser.add_argument("--points", action="store", dest="points", choices=["blocks","groundsurf"], required=True)
parser.add_argument("--log", action="store", dest="logFilename", default="generate_points.log")
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG, filename=args.logFilename)
app = App(args.model_config, args.data_dir)
app.run(args.points)
# End of file
| 37.314815
| 180
| 0.551282
|
82ef49538df4672eed7503fdc29ab16b7b293381
| 8,349
|
py
|
Python
|
HW08_Wordle_Test_final.py
|
aishwaryashirbhate/Wordle-project
|
565895b6d687ce0b3d3d89adc358f07d1206c62f
|
[
"MIT"
] | null | null | null |
HW08_Wordle_Test_final.py
|
aishwaryashirbhate/Wordle-project
|
565895b6d687ce0b3d3d89adc358f07d1206c62f
|
[
"MIT"
] | null | null | null |
HW08_Wordle_Test_final.py
|
aishwaryashirbhate/Wordle-project
|
565895b6d687ce0b3d3d89adc358f07d1206c62f
|
[
"MIT"
] | null | null | null |
import unittest
from unittest.mock import patch
import os
import HW08_Aishwarya_Shirbhate_dictionary_final as dict
import HW08_Aishwarya_Shirbhate_wordle_final as w
import HW08_Aishwarya_Shirbhate_ui_final as ui
import HW08_Aishwarya_Shirbhate_occurence_stats_final as oss
import HW08_Aishwarya_Shirbhate_utility_final as uf
class wordle_test(unittest.TestCase):
# @patch('builtins.input', return_value=)
def test_case1(self): #guess is the answer
theAns = "honey"
theGuess = "honey"
# clue = wordGuesser(theAns, theGuess)
self.assertEqual(w.word_guess(theAns, theGuess).wordGuesser()[0], True)
def test_case2(self): #guess is not the answer
theAns = "balls"
theGuess = "babes"
# clue = wordGuesser(theAns, theGuess)
res = w.word_guess(theAns, theGuess).wordGuesser()
self.assertEqual(''.join(res[1]), ' "" ')
def test_case3(self):
theAns = "aaron"
theGuess = "stats" #guess is of multiple repeated characters
# clue = wordGuesser(theAns, theGuess)
res= w.word_guess(theAns, theGuess).wordGuesser()
self.assertEqual(''.join(res[1]), '""`""')
@patch('builtins.input', side_effect=['', "balls"])
def test_case4(self, input): # when you pass empty input
theAns = "balls"
theGuess = ""
number_of_wins = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
history = {"won": 0, "loss": 0}
obj=ui.UI()
obj.input_check(theAns)
self.assertEqual(obj.get_variables()[0], "exit")
@patch('builtins.input', side_effect=['small', "balls"])
def test_case5(self, input): # when you pass incorrect input
theAns = "balls"
theGuess = "small"
number_of_wins = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
history = {"won": 0, "loss": 0}
obj = ui.UI()
obj.input_check(theAns)
self.assertEqual(obj.get_variables()[0], "play")
@patch('builtins.input', side_effect=['small', "balls"])
def test_case6(self, input): # when you pass correct input (to check main function)
theAns = "balls"
theGuess = "balls"
number_of_wins = {1: 0, 2: 1, 3: 0, 4: 0, 5: 0, 6: 0}
history = {"won": 0, "loss": 0}
obj = ui.UI()
obj.set_variables("play",number_of_wins,history,[])
obj.input_check(theAns)
self.assertEqual(obj.get_variables()[0], "play")
@patch('builtins.input', side_effect=['balls', "balls"])
def test_case7(self, input): # when you pass correct input(check history)
theAns = "balls"
theGuess = "balls"
number_of_wins = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
history = {"won": 0, "loss": 0}
obj = ui.UI()
obj.set_variables("play", number_of_wins, history, [])
obj.input_check(theAns)
self.assertEqual(obj.get_variables()[1], {'loss': 0, 'won': 1})
@patch('builtins.input', side_effect=['stats', 'human', 'viral', 'along', 'among', 'small'])
def test_case8(self, input): # when user loses game (check history)
theAns = "honey"
theGuess = "balls"
number_of_wins = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
history = {"won": 0, "loss": 0}
obj = ui.UI()
obj.set_variables("play", number_of_wins, history, [])
obj.input_check(theAns)
self.assertEqual(obj.get_variables()[1]['loss'], 1)
@patch('builtins.input', side_effect=['stats', 'human', 'viral', 'along', 'among', 'small',
'stats', 'human', 'viral', 'along', 'among', 'small',
'stats', 'human', 'viral', 'along', 'among', 'small'])
def test_case9(self, input): # when user plays multiple games and check history is maintained for the same
theAns = "honey"
theGuess = "balls"
number_of_wins = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}
history = {"won": 0, "loss": 0}
obj = ui.UI()
obj.set_variables("play", number_of_wins, history, [])
obj.input_check(theAns)
history = obj.get_variables()[1]
theAns = "viral"
theGuess = "balls"
obj.set_variables("play", number_of_wins, history, [])
obj.input_check(theAns)
history = obj.get_variables()[1]
theAns = "small"
theGuess = "balls"
obj.set_variables("play", number_of_wins, history, [])
obj.input_check(theAns)
history = obj.get_variables()[1]
self.assertEqual(obj.get_variables()[1], {'loss': 1, 'won': 2})
def test_case10(self): # checks if word list contains same number of letters as in word list file
i = 0
with open("word_list.txt") as word_file:
data = word_file.read()
lines = data.split()
i += len(lines)
self.assertEqual(len(dict.dictionary().func_dict()), i, True)
def test_case11(self): # checks if input letter is of length 5
self.assertEqual(len(dict.dictionary().func_englishword()), 5)
@patch('builtins.input', side_effect=[{'loss': 0, 'won': 0} ])
def test_case12(self, input): # checks if stats are displayed correctly if user has lost and won games
history = {'loss': 1, 'won': 1}
obj=ui.UI()
obj.set_variables("play", {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}, history, [])
self.assertEqual(obj.game_stats(), (2, 50.0))
@patch('builtins.input', side_effect=[{'loss': 0, 'won': 0}])
def test_case13(self, input): # checks if stats are displayed correctly if games are won
history = {'loss': 0, 'won': 1}
obj = ui.UI()
obj.set_variables("play", {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}, history, [])
self.assertEqual(obj.game_stats(), (1, 100.0))
@patch('builtins.input', side_effect=[{'loss': 0, 'won': 0}])
def test_case14(self, input): # checks if stats are displayed correctly if games are lost
history = {'loss': 1, 'won': 0}
obj = ui.UI()
obj.set_variables("play", {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}, history, [])
self.assertEqual(obj.game_stats(), (1, 0.0))
def test_case15(self): # checks if all 5-letter words are returned to the function
self.assertEqual(len(uf.Fiveletterwords().five_letter_word()), 1379)
def test_case16(self): # checks if Add_to_freq_file works correctly
tmp_file = 'tmp.csv'
elements = ["a"] + [1, 2, 3, 4, 5]
oss.occ_stats().Add_to_freq_file(tmp_file, elements)
my_file = open("tmp.csv", "r")
content = my_file.read()
self.assertEqual(content, "a,1,2,3,4,5\n")
os.remove(tmp_file)
def test_case17(self): # checks if Add_to_rank_file works correctly
tmp_file = 'tmp1.csv'
elements = [('sales', '327814032000/4986792881682899'), ('bones', '227449999296/4986792881682899')]
oss.occ_stats().Add_to_rank_file(tmp_file, elements)
my_file = open("tmp1.csv", "r")
content = my_file.read()
self.assertEqual(content, "1,sales,327814032000/4986792881682899\n2,bones,227449999296/4986792881682899\n")
os.remove(tmp_file)
def test_case18(self): # checks if Convert_list_to_tuple works correctly
d = {"a": [1, 2, 3, 4, 5]}
obj = oss.occ_stats()
obj.set_variables(d)
self.assertEqual(obj.Convert_list_to_tuple(), {'a': (1, 2, 3, 4, 5)})
def test_case19(self): # checks if Parse_file_to_tuple works correctly
file = 'tmp2.csv'
f = open(file, "a")
f.write("a,1,2,3,4,5")
f.close()
oss.occ_stats().Parse_file_to_tuple(file)
my_file = open("tmp2.csv", "r")
content = my_file.read()
self.assertEqual(content, ("a,1,2,3,4,5"))
os.remove(file)
def test_case20(self): # checks if find_ranks works correctly
words = ["a"]
occur_dict = {'a': (78, 230, 171, 89, 51)}
n = 1379
obj = oss.occ_stats()
obj.set_variables(occur_dict)
a = obj.find_ranks(words, n)
self.assertEqual(a, {'a': '78/4986792881682899'})
if __name__ == '__main__':
unittest.main()
| 41.331683
| 115
| 0.577195
|
51c495053a0a3ce5ea8e40c65df3ed70ec3809ff
| 2,847
|
py
|
Python
|
train.py
|
ikathuria/ASLRecognition
|
87f3ecc7d630c95f7c3bdc9d514f7ed71a6ac2bf
|
[
"MIT"
] | null | null | null |
train.py
|
ikathuria/ASLRecognition
|
87f3ecc7d630c95f7c3bdc9d514f7ed71a6ac2bf
|
[
"MIT"
] | 3
|
2021-03-14T09:17:33.000Z
|
2021-03-14T09:25:57.000Z
|
train.py
|
ikathuria/ASLRecognition
|
87f3ecc7d630c95f7c3bdc9d514f7ed71a6ac2bf
|
[
"MIT"
] | 1
|
2021-02-25T06:42:36.000Z
|
2021-02-25T06:42:36.000Z
|
"""Training the Convolutional Neural Network."""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import cv2
import numpy as np
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten, BatchNormalization
from keras.layers import Dropout
from keras.callbacks import EarlyStopping
# STEP 1: Converting dataset
labels = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I']
mode = "ASL"
dataset_path = f"{mode}_data_processed"
loaded_images = []
for folder in labels:
gesture_path = os.path.join(dataset_path, folder)
k = 0
for img in os.listdir(gesture_path):
image = cv2.imread(os.path.join(gesture_path, img))
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray_image = cv2.resize(gray_image, (100, 100))
loaded_images.append(gray_image)
k += 1
print("Total images in dataset:", len(loaded_images))
outputVectors = []
for i in range(9):
temp = [0, 0, 0, 0, 0, 0, 0, 0, 0]
temp[i] = 1
for _ in range(0, k):
outputVectors.append(temp)
print("Output vector length:", len(outputVectors))
X = np.asarray(loaded_images)
y = np.asarray(outputVectors)
print("X shape:", X.shape)
print("y shape:", y.shape)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
X_train = X_train.reshape(X_train.shape[0], 100, 100, 1)
X_test = X_test.reshape(X_test.shape[0], 100, 100, 1)
print("Number of training images:", X_train.shape)
print("Number of test images:", X_test.shape)
print("---"*25, "\n\n\n")
# STEP 2: Model
model = Sequential()
# first conv layer
# input shape = (img_rows, img_cols, 3)
model.add(Conv2D(32, kernel_size=(3, 3),
activation="relu", input_shape=(100, 100, 1)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# second conv layer
model.add(Conv2D(64, kernel_size=(3, 3), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.30))
# flatten and put a fully connected layer
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.40))
# softmax layer
model.add(Dense(9, activation="softmax"))
# compile model
model.compile(
optimizer="adam",
loss="categorical_crossentropy",
metrics=["categorical_accuracy"],
)
model.summary()
# STEP 3: Fit the model
print("\nSTARTING TRAINING!\n\n")
# check for overfitting
es_callback = EarlyStopping(monitor='val_loss', patience=3)
model.fit(
X_train,
y_train,
batch_size=10,
epochs=50,
verbose=1,
validation_data=(X_test, y_test),
callbacks=[es_callback],
)
# STEP 4: Save the model
model.save(f"{mode}_model.h5")
print("Model saved")
| 25.419643
| 68
| 0.692659
|
1fc1e5c793d75b4483f1c284678826e9b661bdb1
| 5,495
|
py
|
Python
|
gst-python-samples/Gstreamer-Applications/gst_audio/audio_record.py
|
raikonenfnu/sample-apps-for-Qualcomm-Robotics-RB5-platform
|
61b56352ce0c9a98afa67fff57bd40c4d319c721
|
[
"BSD-3-Clause-Clear"
] | 30
|
2020-10-01T07:53:25.000Z
|
2022-03-25T07:36:32.000Z
|
gst-python-samples/Gstreamer-Applications/gst_audio/audio_record.py
|
raikonenfnu/sample-apps-for-Qualcomm-Robotics-RB5-platform
|
61b56352ce0c9a98afa67fff57bd40c4d319c721
|
[
"BSD-3-Clause-Clear"
] | 43
|
2020-10-13T01:39:42.000Z
|
2022-03-29T02:13:43.000Z
|
gst-python-samples/Gstreamer-Applications/gst_audio/audio_record.py
|
raikonenfnu/sample-apps-for-Qualcomm-Robotics-RB5-platform
|
61b56352ce0c9a98afa67fff57bd40c4d319c721
|
[
"BSD-3-Clause-Clear"
] | 19
|
2020-10-19T19:28:12.000Z
|
2022-03-30T07:08:22.000Z
|
import sys
import gi
import logging
import os
gi.require_version("GLib", "2.0")
gi.require_version("GObject", "2.0")
gi.require_version("Gst", "1.0")
from gi.repository import Gst, GLib, GObject
logging.basicConfig(level=logging.DEBUG,
format="[%(name)s] [%(levelname)8s] - %(message)s")
logger = logging.getLogger(__name__)
if __name__ == "__main__":
# Initialize GStreamer passing command line argument
Gst.init(sys.argv)
loop = GLib.MainLoop()
if len(sys.argv) < 1:
print("Missing <output-location> parameter")
sys.exit(1)
file_format = os.path.splitext(sys.argv[1])[1]
# Create the empty pipeline
pipeline = Gst.Pipeline.new("audio record")
pulsesrc = Gst.ElementFactory.make("pulsesrc", "pulseaudio_src");
audioconvert = Gst.ElementFactory.make("audioconvert", "audio_convert");
capsfilter = Gst.ElementFactory.make("capsfilter", "caps_filter");
queue = Gst.ElementFactory.make("queue", "queue");
filesink = Gst.ElementFactory.make("filesink", "file_sink");
if not pipeline or not pulsesrc or not audioconvert or not capsfilter or not queue or not filesink:
print("Create element failed")
sys.exit(1)
pulsesrc.set_property('num-buffers', 1000)
pulsesrc.set_property('buffer-time', 30000)
caps=Gst.Caps.new_empty_simple("audio/x-raw")
capsfilter.set_property('caps', caps)
filesink.set_property('location', sys.argv[1])
if file_format == ".wav":
wavenc=Gst.ElementFactory.make("wavenc", "wav_enc");
if not wavenc:
print("Create element failed.\n");
sys.exit(1)
pipeline.add(pulsesrc)
pipeline.add(capsfilter)
pipeline.add(audioconvert)
pipeline.add(wavenc)
pipeline.add(queue)
pipeline.add(filesink)
if not pulsesrc.link(capsfilter):
print("ERROR: Could not link pulsesrc to capsfilter")
sys.exit(1)
if not capsfilter.link(audioconvert):
print("ERROR: Could not link capsfilter to audioconvert")
sys.exit(1)
if not audioconvert.link(wavenc):
print("ERROR: Could not link audioconvert to wavenc")
sys.exit(1)
if not wavenc.link(queue):
print("ERROR: Could not link wavenc to queue")
sys.exit(1)
if not queue.link(filesink):
print("ERROR: Could not link queue to filesink")
sys.exit(1)
elif file_format == ".aac":
avenc_aac=Gst.ElementFactory.make("avenc_aac", "aac_enc");
aacparse=Gst.ElementFactory.make("aacparse", "aac_parse");
aac_caps=Gst.ElementFactory.make("capsfilter", "aac_caps_filter");
if not avenc_aac or not aacparse or not aac_caps:
print("Create element failed.\n");
sys.exit(1)
caps=Gst.Caps.new_empty_simple('audio/mpeg')
aac_caps.set_property('caps', caps)
pipeline.add(pulsesrc)
pipeline.add(capsfilter)
pipeline.add(audioconvert)
pipeline.add(avenc_aac)
pipeline.add(aacparse)
pipeline.add(aac_caps)
pipeline.add(queue)
pipeline.add(filesink)
if not pulsesrc.link(capsfilter):
print("ERROR: Could not link pulsesrc to capsfilter")
sys.exit(1)
if not capsfilter.link(audioconvert):
print("ERROR: Could not link capsfilter to audioconvert")
sys.exit(1)
if not audioconvert.link(avenc_aac):
print("ERROR: Could not link audioconvert to avenc_aac")
sys.exit(1)
if not avenc_aac.link(aacparse):
print("ERROR: Could not link avenc_aac to aacparse")
sys.exit(1)
if not aacparse.link(aac_caps):
print("ERROR: Could not link aacparse to aac_caps")
sys.exit(1)
if not aac_caps.link(queue):
print("ERROR: Could not link aac_caps to queue")
sys.exit(1)
if not queue.link(filesink):
print("ERROR: Could not link queue to filesink")
sys.exit(1)
elif file_format == ".mp3":
lamemp3enc=Gst.ElementFactory.make("lamemp3enc", "mp3_enc");
if not lamemp3enc:
print("Create element failed.\n");
sys.exit(1)
pipeline.add(pulsesrc)
pipeline.add(capsfilter)
pipeline.add(audioconvert)
pipeline.add(lamemp3enc)
pipeline.add(queue)
pipeline.add(filesink)
if not pulsesrc.link(capsfilter):
print("ERROR: Could not link pulsesrc to capsfilter")
sys.exit(1)
if not capsfilter.link(audioconvert):
print("ERROR: Could not link capsfilter to audioconvert")
sys.exit(1)
if not audioconvert.link(lamemp3enc):
print("ERROR: Could not link audioconvert to lamemp3enc")
sys.exit(1)
if not lamemp3enc.link(queue):
print("ERROR: Could not link lamemp3enc to queue")
sys.exit(1)
if not queue.link(filesink):
print("ERROR: Could not link queue to filesink")
sys.exit(1)
else:
print("Format %s not supported\n", format);
sys.exit(1)
pipeline.set_state(Gst.State.PLAYING)
print("Audio record started");
bus = pipeline.get_bus()
bus_id = bus.add_signal_watch()
msg = bus.timed_pop_filtered(Gst.CLOCK_TIME_NONE,Gst.MessageType.ERROR | Gst.MessageType.EOS)
if msg:
t = msg.type
if t == Gst.MessageType.ERROR:
err, dbg = msg.parse_error()
print("ERROR:", msg.src.get_name(), ":", err.message)
if dbg:
print("debugging info:", dbg)
elif t == Gst.MessageType.EOS:
print("stopped recording")
else:
print("ERROR: Unexpected message received.")
pipeline.set_state(Gst.State.NULL)
Gst.Object.unref(bus)
Gst.deinit()
| 30.192308
| 99
| 0.666424
|
de38db859fa04c7fe69b403a6cafbd7b10dfd02b
| 633
|
py
|
Python
|
tree.py
|
jdowner/tree
|
f85ba0423a2877e1b2c63ee4c426acf2e1aab3e2
|
[
"MIT"
] | null | null | null |
tree.py
|
jdowner/tree
|
f85ba0423a2877e1b2c63ee4c426acf2e1aab3e2
|
[
"MIT"
] | null | null | null |
tree.py
|
jdowner/tree
|
f85ba0423a2877e1b2c63ee4c426acf2e1aab3e2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import networkx as nx
graph = nx.DiGraph(["ac", "cd", "ce", "ab", 'ef','dg', 'dh'])
def tree(graph):
def recurse(node, padding, last=False):
if last:
print(u"{}└── {}".format(padding[:-4], node))
else:
print(u"{}├── {}".format(padding[:-4], node))
children = graph.successors(node)
if children:
for child in children[:-1]:
recurse(child, padding + u"│ ", last=False)
recurse(children[-1], padding + u" ", last=True)
recurse(graph.nodes()[0], u" ", last=True)
tree(graph)
| 24.346154
| 63
| 0.507109
|
38c1c316f2c8173a20cd2492693e96e4f86b7119
| 7,446
|
py
|
Python
|
utils.py
|
Zhang-Shubo/ai_nlp_model_zoo
|
1fe384463cafbef89f4b0d27917ac94f2ff2c6f1
|
[
"MIT"
] | null | null | null |
utils.py
|
Zhang-Shubo/ai_nlp_model_zoo
|
1fe384463cafbef89f4b0d27917ac94f2ff2c6f1
|
[
"MIT"
] | null | null | null |
utils.py
|
Zhang-Shubo/ai_nlp_model_zoo
|
1fe384463cafbef89f4b0d27917ac94f2ff2c6f1
|
[
"MIT"
] | null | null | null |
# time: 2021/4/28 22:50
# File: utils.py
# Author: zhangshubo
# Mail: supozhang@126.com
import json
import os
import random
import torch
_bert_token_dict = json.loads(open("data/bert/bert-base-chinese/tokenizer.json", encoding="utf-8").read())["model"][
"vocab"]
def read_nlpcc_text(path):
with open(path, "r", encoding="utf-8") as fd:
while True:
line = fd.readline()
if not line:
break
label, text = line.strip("\n").split("\t")
yield text.replace(" ", ""), label
def read_cluener_text(path):
def transfer(da):
word_list = list(da["text"])
ner_list = ["O"] * len(word_list)
for ner_type, value_dict in da["label"].items():
for words, position_list in value_dict.items():
for position in position_list:
if position[0] == position[1]:
ner_list[position[0]] = ner_type.upper() + "_S"
else:
ner_list[position[0]] = ner_type.upper() + "_B"
for pos in range(position[0] + 1, position[1] + 1):
ner_list[pos] = ner_type.upper() + "_M"
return word_list, ner_list
with open(path, "r", encoding="utf-8") as fd:
while True:
line = fd.readline()
if not line:
break
data = json.loads(line)
yield transfer(data)
class VocabDict(dict):
def __init__(self, inputs, save_path="data/classifier/vocab.txt"):
super(VocabDict, self).__init__({"<PAD>": 0, "<UNK>": 1})
self.data = ["<PAD>", "<UNK>"]
self.save_path = save_path
self.weights = [[0.0] * 200, [random.random() for _ in range(200)]]
if not self.load():
self.traverse(inputs)
def load(self):
if not os.path.exists(self.save_path):
return
with open(self.save_path, encoding="utf-8") as fd:
while True:
line = fd.readline()
if not line:
break
self.data.append(line.strip())
for i, word in enumerate(self.data):
self[word] = i
return True
def load_pretrained_vocab(self, path):
if not os.path.exists(path):
return
with open(path, encoding="utf-8") as fd:
while True:
line = fd.readline()
if not line:
break
blocks = line.strip().split(" ")
self.data.append(blocks[0])
self.weights.append(list(map(float, blocks[1:])))
for i, word in enumerate(self.data):
self[word] = i
return self
def save(self):
with open(self.save_path, "w", encoding="utf-8") as fd:
for word in self.data:
fd.write(word + "\n")
def traverse(self, inputs):
for line in inputs:
for w in line:
if w not in self:
self[w] = len(self)
self.data.append(w)
def lookup(self, seq):
res = []
for char in seq:
if char in self:
res.append(self[char])
else:
res.append(self["<UNK>"])
return res
class LabelDict(dict):
def __init__(self, labels, save_path="data/classifier/label.txt", sequence=False):
super(LabelDict, self).__init__()
self.data = []
self.save_path = save_path
self.is_sequence = sequence
if self.is_sequence:
self["O"] = 0
self.data.append("O")
if not self.load():
self.traverse(labels)
def load(self):
if not os.path.exists(self.save_path):
return
with open(self.save_path, encoding="utf-8") as fd:
while True:
line = fd.readline()
if not line:
break
self.data.append(line.strip())
for i, label in enumerate(self.data):
self[label] = i
return True
def save(self):
with open(self.save_path, "w", encoding="utf-8") as fd:
for label in self.data:
fd.write(label + "\n")
def traverse(self, labels):
for label in labels:
if not self.is_sequence:
if label not in self:
self[label] = len(self)
self.data.append(label)
else:
for tag in label:
if tag not in self:
self[tag] = len(self)
self.data.append(tag)
def lookup(self, label, begin=False, end=False):
"""
将标签转化为数字
"""
if self.is_sequence:
res = []
if begin:
res.append(self["O"])
for tag in label:
if tag in self:
res.append(self[tag])
if end:
res.append(self["O"])
return res
return self[label]
def refactor(self, predict):
"""
将数字转化为标签
:param predict:
:return:
"""
if not self.is_sequence:
return self.data[predict]
else:
res = []
for tag_idx in predict:
res.append(self.data[tag_idx])
return res
def sequence_padding(seq, max_len, pos="post", pad_idx=0):
z = [pad_idx] * max_len
if len(seq) > max_len:
seq = seq[:max_len]
if pos == "post":
z[:len(seq)] = seq
else:
z[-len(seq):] = seq
return z
def char_tokenizer(batch_x, lookup_f, max_len, device, padding_pos="post"):
batch_x = list(map(lambda x: sequence_padding(x, max_len, pos=padding_pos), map(lookup_f, batch_x)))
batch_x = torch.tensor(batch_x, dtype=torch.long).to(device)
return batch_x
def bert_tokenizer(batch_x, max_len, device):
def lookup(x):
res = [_bert_token_dict["[CLS]"]]
for char in x:
if char in _bert_token_dict:
res.append(_bert_token_dict[char])
else:
res.append(_bert_token_dict["[UNK]"])
res.append(_bert_token_dict["[SEP]"])
return res
batch_x = list(map(lambda x: sequence_padding(x, max_len), map(lookup, batch_x)))
batch_x = torch.tensor(batch_x, dtype=torch.long).to(device)
return batch_x
def extra_tencent_embedding(path):
res = []
with open(path, encoding="utf-8") as fd:
while True:
line = fd.readline()
if not line:
break
if len(line.strip().split(" ")[0]) == 1:
res.append(line)
with open("data/tencent_char_embedding.txt", "w", encoding="utf-8") as fd:
for line in res:
fd.write(line)
def add_weight_decay(net, l2_value, skip_list=()):
decay, no_decay = [], []
for name, param in net.named_parameters():
if not param.requires_grad: continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
no_decay.append(param)
else:
decay.append(param)
return [{'params': no_decay, 'weight_decay': 0.}, {'params': decay, 'weight_decay': l2_value}]
# extra_tencent_embedding(r"E:\tencent_embedding\Tencent_AILab_ChineseEmbedding.txt")
| 31.025
| 116
| 0.520682
|
ccc1ddcce371e88b15292df250f1f5cee4dd1481
| 6,590
|
py
|
Python
|
util.py
|
michaelbeyhs/craigCrawler
|
23246a067d15b8bf2cb1f367716b7d57ee3f3afa
|
[
"MIT"
] | null | null | null |
util.py
|
michaelbeyhs/craigCrawler
|
23246a067d15b8bf2cb1f367716b7d57ee3f3afa
|
[
"MIT"
] | null | null | null |
util.py
|
michaelbeyhs/craigCrawler
|
23246a067d15b8bf2cb1f367716b7d57ee3f3afa
|
[
"MIT"
] | null | null | null |
import settings
import math
from walkscore.api import WalkScore
colorGradient = [
"#FF0000",
"#FF2300",
"#FF4600",
"#FF6900",
"#FF8C00",
"#FFAF00",
"#FFD300",
"#FFF600",
"#E5FF00",
"#C2FF00",
"#9FFF00",
"#7CFF00",
"#58FF00",
"#35FF00",
"#12FF00",
]
numOfColoGradients = 15
def clamp(n, minn, maxn):
return max(min(maxn, n), minn)
def coord_distance(lat1, lon1, lat2, lon2):
"""
Finds the distance between two pairs of latitude and longitude.
:param lat1: Point 1 latitude.
:param lon1: Point 1 longitude.
:param lat2: Point two latitude.
:param lon2: Point two longitude.
:return: Kilometer distance.
"""
lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
c = 2 * math.asin(math.sqrt(a))
km = 6367 * c
return km
def in_box(coords, box):
"""
Find if a coordinate tuple is inside a bounding box.
:param coords: Tuple containing latitude and longitude.
:param box: Two tuples, where first is the bottom left, and the second is the top right of the box.
:return: Boolean indicating if the coordinates are in the box.
"""
if box[0][0] < coords[0] < box[1][0] and box[1][1] < coords[1] < box[0][1]:
return True
return False
def post_listing_to_slack(sc, listing):
"""
Posts the listing to slack.
:param sc: A slack client.
:param listing: A record of the listing.
"""
priceGrade = int(numOfColoGradients-(float(listing["price"][1:])-settings.MIN_PRICE)/((settings.MAX_PRICE-settings.MIN_PRICE))*numOfColoGradients)
walkGrade = int((float(listing["walkscore"])-settings.MIN_WALKSCORE)/(90-settings.MIN_WALKSCORE)*numOfColoGradients)
distGrade = int(numOfColoGradients-(float(listing["bart_dist"])-0)/(settings.IDEAL_TRANSIT_DIST-0)*numOfColoGradients)
priceGrade = clamp(priceGrade,0,numOfColoGradients-1)
walkGrade = clamp(walkGrade,0,numOfColoGradients-1)
distGrade = clamp(distGrade,0,numOfColoGradients-1)
walkscoreColor = colorGradient[walkGrade]
priceColor = colorGradient[priceGrade]
distColor = colorGradient[distGrade]
attach_json = [
{
"fallback": "Required plain-text summary of the attachment.",
"color": "" + priceColor,
"author_name": "" + listing["area"],
"title": "*" + listing["price"] + " - " + listing["name"] + "*",
"title_link": ""+ listing["url"],
#"text": "Cool info here!",
"image_url": ""+listing["img_url"],
#"thumb_url": ""+listing["img_url"],
"footer": "-",
"ts": 123456789
},
{
"color": ""+walkscoreColor,
"fields": [
{
"title": "Walkscore",
"value": "" + str(listing["walkscore"]) + " | <" + listing["ws_link"] + "|Walkscore Link>",
"short": True
}
]
},
{
"color": "" + distColor,
"fields": [
{
"title": "Distance",
"value": "" + str(listing["bart_dist"]) + " - " + listing["bart"] + " | <" +"https://www.google.com/maps/dir/" + listing["bart"] + "/" + str(listing["geotag"][0]) + "," + str(listing["geotag"][1]) + "|Maps>",
"short": True
}
],
}
]
googleLink = "https://www.google.com/maps/dir/" + listing["bart"] + "/" + str(listing["geotag"][0]) + "," + str(listing["geotag"][1])
desc = "{0} | {1} | {2}km - {3} | *{4}* \r\n".format(listing["area"], listing["price"], listing["bart_dist"], listing["bart"], listing["name"])
desc = desc + "<" + listing["url"] + "|Craigslist> | <" + listing["ws_link"] + "|Walkscore " + str(listing["walkscore"]) + "> | <" + googleLink + "|Google Maps>"
sc.api_call(
"chat.postMessage", channel=settings.SLACK_CHANNEL, text=desc,
username='pybot', icon_emoji=':robot_face:', attachments=attach_json
)
#print "posting to Slack \r\n " + desc
def find_points_of_interest(geotag, location):
"""
Find points of interest, like transit, near a result.
:param geotag: The geotag field of a Craigslist result.
:param location: The where field of a Craigslist result. Is a string containing a description of where
the listing was posted.
:return: A dictionary containing annotations.
"""
area_found = False
area = ""
min_dist = None
near_bart = False
bart_dist = "N/A"
bart = ""
# Look to see if the listing is in any of the neighborhood boxes we defined.
for a, coords in settings.BOXES.items():
if in_box(geotag, coords):
area = a
area_found = True
print "------------------listing is in defined GEO Box" + area
# Check to see if the listing is near any transit stations.
for station, coords in settings.TRANSIT_STATIONS.items():
dist = coord_distance(coords[0], coords[1], geotag[0], geotag[1])
if (min_dist is None or dist < min_dist) and dist < settings.MAX_TRANSIT_DIST:
bart = station
near_bart = True
print "listing is close to " + station
if (min_dist is None or dist < min_dist):
bart_dist = round(dist,1)
min_dist = dist
print "Distance is " + str(dist)
# If the listing isn't in any of the boxes we defined, check to see if the string description of the neighborhood
# matches anything in our list of neighborhoods.
if len(area) == 0:
for hood in settings.NEIGHBORHOODS:
if hood in location.lower():
area = hood
print "listing is in defined neighborhood " + hood
return {
"area_found": area_found,
"area": area,
"near_bart": near_bart,
"bart_dist": bart_dist,
"bart": bart
}
def get_walk_score(geotag):
walkscore = WalkScore(settings.WALKSCORE_API_KEY)
address=''
lat=geotag[0]
lon=geotag[1]
response = walkscore.makeRequest(address, lat, lon)
#print(response['walkscore'])
#print(response['description'])
#print(response['ws_link'])
return response['walkscore'], response['ws_link']
| 36.208791
| 232
| 0.568741
|
ca2964a07d716eae8806849527f9eb377aea7ea6
| 1,470
|
py
|
Python
|
test/test_profiler.py
|
gabrielelanaro/profiling
|
8b70a10219dd11868279c8417b8808539d918157
|
[
"BSD-3-Clause"
] | 6
|
2015-05-18T12:58:33.000Z
|
2019-07-31T06:56:29.000Z
|
test/test_profiler.py
|
gabrielelanaro/profiling
|
8b70a10219dd11868279c8417b8808539d918157
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_profiler.py
|
gabrielelanaro/profiling
|
8b70a10219dd11868279c8417b8808539d918157
|
[
"BSD-3-Clause"
] | 2
|
2018-01-04T20:10:04.000Z
|
2021-03-26T06:16:42.000Z
|
# -*- coding: utf-8 -*-
import pytest
from profiling.profiler import Profiler, ProfilerWrapper
from utils import foo, spin
class NullProfiler(Profiler):
def run(self):
yield
class NullProfilerWrapper(ProfilerWrapper):
def run(self):
with self.profiler:
yield
@pytest.fixture
def profiler():
return NullProfiler()
def test_exclude_code(profiler):
foo_code = foo().f_code
with profiler:
assert foo_code not in profiler.stats
profiler.stats.ensure_child(foo_code)
assert foo_code in profiler.stats
profiler.exclude_code(foo_code)
assert foo_code not in profiler.stats
profiler.exclude_code(foo_code)
assert foo_code not in profiler.stats
def test_result(profiler):
__, cpu_time, wall_time = profiler.result()
assert cpu_time == wall_time == 0.0
with profiler:
spin(0.1)
__, cpu_time, wall_time = profiler.result()
assert cpu_time > 0.0
assert wall_time >= 0.1
def test_wrapper(profiler):
wrapper = NullProfilerWrapper(profiler)
assert isinstance(wrapper, Profiler)
assert wrapper.table_class is profiler.table_class
assert wrapper.stats is profiler.stats
__, cpu_time, wall_time = wrapper.result()
assert cpu_time == wall_time == 0.0
with wrapper:
assert wrapper.is_running()
assert profiler.is_running()
assert not wrapper.is_running()
assert not profiler.is_running()
| 24.5
| 56
| 0.690476
|
b93b053e0d5ba7a393da5cea4b7e59281dcff166
| 3,684
|
py
|
Python
|
discord/oggparse.py
|
brotherelric/deezcord.py
|
f7419bf2c67c2006702cccc4850cd9332bce00c6
|
[
"MIT"
] | null | null | null |
discord/oggparse.py
|
brotherelric/deezcord.py
|
f7419bf2c67c2006702cccc4850cd9332bce00c6
|
[
"MIT"
] | null | null | null |
discord/oggparse.py
|
brotherelric/deezcord.py
|
f7419bf2c67c2006702cccc4850cd9332bce00c6
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Copyright (c) 2021-present 404kuso
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import struct
from typing import TYPE_CHECKING, ClassVar, IO, Generator, Tuple, Optional
from .errors import DiscordException
__all__ = (
'OggError',
'OggPage',
'OggStream',
)
class OggError(DiscordException):
"""An exception that is thrown for Ogg stream parsing errors."""
pass
# https://tools.ietf.org/html/rfc3533
# https://tools.ietf.org/html/rfc7845
class OggPage:
_header: ClassVar[struct.Struct] = struct.Struct('<xBQIIIB')
if TYPE_CHECKING:
flag: int
gran_pos: int
serial: int
pagenum: int
crc: int
segnum: int
def __init__(self, stream: IO[bytes]) -> None:
try:
header = stream.read(struct.calcsize(self._header.format))
self.flag, self.gran_pos, self.serial, \
self.pagenum, self.crc, self.segnum = self._header.unpack(header)
self.segtable: bytes = stream.read(self.segnum)
bodylen = sum(struct.unpack('B'*self.segnum, self.segtable))
self.data: bytes = stream.read(bodylen)
except Exception:
raise OggError('bad data stream') from None
def iter_packets(self) -> Generator[Tuple[bytes, bool], None, None]:
packetlen = offset = 0
partial = True
for seg in self.segtable:
if seg == 255:
packetlen += 255
partial = True
else:
packetlen += seg
yield self.data[offset:offset+packetlen], True
offset += packetlen
packetlen = 0
partial = False
if partial:
yield self.data[offset:], False
class OggStream:
def __init__(self, stream: IO[bytes]) -> None:
self.stream: IO[bytes] = stream
def _next_page(self) -> Optional[OggPage]:
head = self.stream.read(4)
if head == b'OggS':
return OggPage(self.stream)
elif not head:
return None
else:
raise OggError('invalid header magic')
def _iter_pages(self) -> Generator[OggPage, None, None]:
page = self._next_page()
while page:
yield page
page = self._next_page()
def iter_packets(self) -> Generator[bytes, None, None]:
partial = b''
for page in self._iter_pages():
for data, complete in page.iter_packets():
partial += data
if complete:
yield partial
partial = b''
| 32.034783
| 77
| 0.639251
|
802e43aa878cf9c654dc6becd6370fbddf336fc2
| 32,987
|
py
|
Python
|
environ/test.py
|
jbagot/django-environ
|
ecda4bd0189ddd6a5faa5de044df5def47e6ca38
|
[
"MIT"
] | null | null | null |
environ/test.py
|
jbagot/django-environ
|
ecda4bd0189ddd6a5faa5de044df5def47e6ca38
|
[
"MIT"
] | null | null | null |
environ/test.py
|
jbagot/django-environ
|
ecda4bd0189ddd6a5faa5de044df5def47e6ca38
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import os
import sys
import unittest
import warnings
from .compat import (
json, DJANGO_POSTGRES, ImproperlyConfigured, REDIS_DRIVER, quote
)
from environ import Env, Path
class BaseTests(unittest.TestCase):
URL = 'http://www.google.com/'
POSTGRES = 'postgres://uf07k1:wegauwhg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722'
MYSQL = 'mysql://bea6eb0:69772142@us-cdbr-east.cleardb.com/heroku_97681?reconnect=true'
MYSQLGIS = 'mysqlgis://user:password@127.0.0.1/some_database'
SQLITE = 'sqlite:////full/path/to/your/database/file.sqlite'
ORACLE_TNS = 'oracle://user:password@sid/'
ORACLE = 'oracle://user:password@host:1521/sid'
CUSTOM_BACKEND = 'custom.backend://user:password@example.com:5430/database'
REDSHIFT = 'redshift://user:password@examplecluster.abc123xyz789.us-west-2.redshift.amazonaws.com:5439/dev'
MEMCACHE = 'memcache://127.0.0.1:11211'
REDIS = 'rediscache://127.0.0.1:6379/1?client_class=django_redis.client.DefaultClient&password=secret'
EMAIL = 'smtps://user@domain.com:password@smtp.example.com:587'
JSON = dict(one='bar', two=2, three=33.44)
DICT = dict(foo='bar', test='on')
PATH = '/home/dev'
EXPORTED = 'exported var'
@classmethod
def generateData(cls):
return dict(STR_VAR='bar',
MULTILINE_STR_VAR='foo\\nbar',
STR_RE_VAR='bar-1.0.0',
INT_VAR='42',
FLOAT_VAR='33.3',
FLOAT_COMMA_VAR='33,3',
FLOAT_STRANGE_VAR1='123,420,333.3',
FLOAT_STRANGE_VAR2='123.420.333,3',
BOOL_TRUE_VAR='1',
BOOL_TRUE_VAR2='True',
BOOL_FALSE_VAR='0',
BOOL_FALSE_VAR2='False',
PROXIED_VAR='$STR_VAR',
INT_LIST='42,33',
INT_TUPLE='(42,33)',
STR_LIST_WITH_SPACES=' foo, bar',
EMPTY_LIST='',
DICT_VAR='foo=bar,test=on',
DATABASE_URL=cls.POSTGRES,
DATABASE_MYSQL_URL=cls.MYSQL,
DATABASE_MYSQL_GIS_URL=cls.MYSQLGIS,
DATABASE_SQLITE_URL=cls.SQLITE,
DATABASE_ORACLE_URL=cls.ORACLE,
DATABASE_ORACLE_TNS_URL=cls.ORACLE_TNS,
DATABASE_REDSHIFT_URL=cls.REDSHIFT,
DATABASE_CUSTOM_BACKEND_URL=cls.CUSTOM_BACKEND,
CACHE_URL=cls.MEMCACHE,
CACHE_REDIS=cls.REDIS,
EMAIL_URL=cls.EMAIL,
URL_VAR=cls.URL,
JSON_VAR=json.dumps(cls.JSON),
PATH_VAR=cls.PATH,
EXPORTED_VAR=cls.EXPORTED)
def setUp(self):
self._old_environ = os.environ
os.environ = Env.ENVIRON = self.generateData()
self.env = Env()
def tearDown(self):
os.environ = self._old_environ
def assertTypeAndValue(self, type_, expected, actual):
self.assertEqual(type_, type(actual))
self.assertEqual(expected, actual)
class EnvTests(BaseTests):
def test_not_present_with_default(self):
self.assertEqual(3, self.env('not_present', default=3))
def test_not_present_without_default(self):
self.assertRaises(ImproperlyConfigured, self.env, 'not_present')
def test_contains(self):
self.assertTrue('STR_VAR' in self.env)
self.assertTrue('EMPTY_LIST' in self.env)
self.assertFalse('I_AM_NOT_A_VAR' in self.env)
def test_str(self):
self.assertTypeAndValue(str, 'bar', self.env('STR_VAR'))
self.assertTypeAndValue(str, 'bar', self.env.str('STR_VAR'))
self.assertTypeAndValue(str, 'foo\\nbar', self.env.str('MULTILINE_STR_VAR'))
self.assertTypeAndValue(str, 'foo\nbar', self.env.str('MULTILINE_STR_VAR', multiline=True))
def test_re(self):
self.assertTypeAndValue(str, '1.0.0', self.env.re('STR_RE_VAR', r'\d+.\d+.\d+'))
self.assertTypeAndValue(str, 'foo', self.env.re('MULTILINE_STR_VAR', r'\w+'))
self.assertTypeAndValue(str, 'bar', self.env.re('STR_VAR', r'\d+'))
def test_bytes(self):
self.assertTypeAndValue(bytes, b'bar', self.env.bytes('STR_VAR'))
def test_int(self):
self.assertTypeAndValue(int, 42, self.env('INT_VAR', cast=int))
self.assertTypeAndValue(int, 42, self.env.int('INT_VAR'))
def test_int_with_none_default(self):
self.assertTrue(self.env('NOT_PRESENT_VAR', cast=int, default=None) is None)
def test_float(self):
self.assertTypeAndValue(float, 33.3, self.env('FLOAT_VAR', cast=float))
self.assertTypeAndValue(float, 33.3, self.env.float('FLOAT_VAR'))
self.assertTypeAndValue(float, 33.3, self.env('FLOAT_COMMA_VAR', cast=float))
self.assertTypeAndValue(float, 123420333.3, self.env('FLOAT_STRANGE_VAR1', cast=float))
self.assertTypeAndValue(float, 123420333.3, self.env('FLOAT_STRANGE_VAR2', cast=float))
def test_bool_true(self):
self.assertTypeAndValue(bool, True, self.env('BOOL_TRUE_VAR', cast=bool))
self.assertTypeAndValue(bool, True, self.env('BOOL_TRUE_VAR2', cast=bool))
self.assertTypeAndValue(bool, True, self.env.bool('BOOL_TRUE_VAR'))
def test_bool_false(self):
self.assertTypeAndValue(bool, False, self.env('BOOL_FALSE_VAR', cast=bool))
self.assertTypeAndValue(bool, False, self.env('BOOL_FALSE_VAR2', cast=bool))
self.assertTypeAndValue(bool, False, self.env.bool('BOOL_FALSE_VAR'))
def test_proxied_value(self):
self.assertEqual('bar', self.env('PROXIED_VAR'))
def test_int_list(self):
self.assertTypeAndValue(list, [42, 33], self.env('INT_LIST', cast=[int]))
self.assertTypeAndValue(list, [42, 33], self.env.list('INT_LIST', int))
def test_int_tuple(self):
self.assertTypeAndValue(tuple, (42, 33), self.env('INT_LIST', cast=(int,)))
self.assertTypeAndValue(tuple, (42, 33), self.env.tuple('INT_LIST', int))
self.assertTypeAndValue(tuple, ('42', '33'), self.env.tuple('INT_LIST'))
def test_str_list_with_spaces(self):
self.assertTypeAndValue(list, [' foo', ' bar'],
self.env('STR_LIST_WITH_SPACES', cast=[str]))
self.assertTypeAndValue(list, [' foo', ' bar'],
self.env.list('STR_LIST_WITH_SPACES'))
def test_empty_list(self):
self.assertTypeAndValue(list, [], self.env('EMPTY_LIST', cast=[int]))
def test_dict_value(self):
self.assertTypeAndValue(dict, self.DICT, self.env.dict('DICT_VAR'))
def test_dict_parsing(self):
self.assertEqual({'a': '1'}, self.env.parse_value('a=1', dict))
self.assertEqual({'a': 1}, self.env.parse_value('a=1', dict(value=int)))
self.assertEqual({'a': ['1', '2', '3']}, self.env.parse_value('a=1,2,3', dict(value=[str])))
self.assertEqual({'a': [1, 2, 3]}, self.env.parse_value('a=1,2,3', dict(value=[int])))
self.assertEqual({'a': 1, 'b': [1.1, 2.2], 'c': 3},
self.env.parse_value('a=1;b=1.1,2.2;c=3', dict(value=int, cast=dict(b=[float]))))
self.assertEqual({'a': "uname", 'c': "http://www.google.com", 'b': True},
self.env.parse_value('a=uname;c=http://www.google.com;b=True', dict(value=str, cast=dict(b=bool))))
def test_url_value(self):
url = self.env.url('URL_VAR')
self.assertEqual(url.__class__, self.env.URL_CLASS)
self.assertEqual(url.geturl(), self.URL)
self.assertEqual(None, self.env.url('OTHER_URL', default=None))
def test_url_encoded_parts(self):
password_with_unquoted_characters = "#password"
encoded_url = "mysql://user:%s@127.0.0.1:3306/dbname" % quote(password_with_unquoted_characters)
parsed_url = self.env.db_url_config(encoded_url)
self.assertEqual(parsed_url['PASSWORD'], password_with_unquoted_characters)
def test_db_url_value(self):
pg_config = self.env.db()
self.assertEqual(pg_config['ENGINE'], DJANGO_POSTGRES)
self.assertEqual(pg_config['NAME'], 'd8r82722')
self.assertEqual(pg_config['HOST'], 'ec2-107-21-253-135.compute-1.amazonaws.com')
self.assertEqual(pg_config['USER'], 'uf07k1')
self.assertEqual(pg_config['PASSWORD'], 'wegauwhg')
self.assertEqual(pg_config['PORT'], 5431)
mysql_config = self.env.db('DATABASE_MYSQL_URL')
self.assertEqual(mysql_config['ENGINE'], 'django.db.backends.mysql')
self.assertEqual(mysql_config['NAME'], 'heroku_97681')
self.assertEqual(mysql_config['HOST'], 'us-cdbr-east.cleardb.com')
self.assertEqual(mysql_config['USER'], 'bea6eb0')
self.assertEqual(mysql_config['PASSWORD'], '69772142')
self.assertEqual(mysql_config['PORT'], '')
mysql_gis_config = self.env.db('DATABASE_MYSQL_GIS_URL')
self.assertEqual(mysql_gis_config['ENGINE'], 'django.contrib.gis.db.backends.mysql')
self.assertEqual(mysql_gis_config['NAME'], 'some_database')
self.assertEqual(mysql_gis_config['HOST'], '127.0.0.1')
self.assertEqual(mysql_gis_config['USER'], 'user')
self.assertEqual(mysql_gis_config['PASSWORD'], 'password')
self.assertEqual(mysql_gis_config['PORT'], '')
oracle_config = self.env.db('DATABASE_ORACLE_TNS_URL')
self.assertEqual(oracle_config['ENGINE'], 'django.db.backends.oracle')
self.assertEqual(oracle_config['NAME'], 'sid')
self.assertEqual(oracle_config['HOST'], '')
self.assertEqual(oracle_config['USER'], 'user')
self.assertEqual(oracle_config['PASSWORD'], 'password')
self.assertFalse('PORT' in oracle_config)
oracle_config = self.env.db('DATABASE_ORACLE_URL')
self.assertEqual(oracle_config['ENGINE'], 'django.db.backends.oracle')
self.assertEqual(oracle_config['NAME'], 'sid')
self.assertEqual(oracle_config['HOST'], 'host')
self.assertEqual(oracle_config['USER'], 'user')
self.assertEqual(oracle_config['PASSWORD'], 'password')
self.assertEqual(oracle_config['PORT'], '1521')
redshift_config = self.env.db('DATABASE_REDSHIFT_URL')
self.assertEqual(redshift_config['ENGINE'], 'django_redshift_backend')
self.assertEqual(redshift_config['NAME'], 'dev')
self.assertEqual(redshift_config['HOST'], 'examplecluster.abc123xyz789.us-west-2.redshift.amazonaws.com')
self.assertEqual(redshift_config['USER'], 'user')
self.assertEqual(redshift_config['PASSWORD'], 'password')
self.assertEqual(redshift_config['PORT'], 5439)
sqlite_config = self.env.db('DATABASE_SQLITE_URL')
self.assertEqual(sqlite_config['ENGINE'], 'django.db.backends.sqlite3')
self.assertEqual(sqlite_config['NAME'], '/full/path/to/your/database/file.sqlite')
custom_backend_config = self.env.db('DATABASE_CUSTOM_BACKEND_URL')
self.assertEqual(custom_backend_config['ENGINE'], 'custom.backend')
self.assertEqual(custom_backend_config['NAME'], 'database')
self.assertEqual(custom_backend_config['HOST'], 'example.com')
self.assertEqual(custom_backend_config['USER'], 'user')
self.assertEqual(custom_backend_config['PASSWORD'], 'password')
self.assertEqual(custom_backend_config['PORT'], 5430)
def test_cache_url_value(self):
cache_config = self.env.cache_url()
self.assertEqual(cache_config['BACKEND'], 'django.core.cache.backends.memcached.MemcachedCache')
self.assertEqual(cache_config['LOCATION'], '127.0.0.1:11211')
redis_config = self.env.cache_url('CACHE_REDIS')
self.assertEqual(redis_config['BACKEND'], 'django_redis.cache.RedisCache')
self.assertEqual(redis_config['LOCATION'], 'redis://127.0.0.1:6379/1')
self.assertEqual(redis_config['OPTIONS'], {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'PASSWORD': 'secret',
})
def test_email_url_value(self):
email_config = self.env.email_url()
self.assertEqual(email_config['EMAIL_BACKEND'], 'django.core.mail.backends.smtp.EmailBackend')
self.assertEqual(email_config['EMAIL_HOST'], 'smtp.example.com')
self.assertEqual(email_config['EMAIL_HOST_PASSWORD'], 'password')
self.assertEqual(email_config['EMAIL_HOST_USER'], 'user@domain.com')
self.assertEqual(email_config['EMAIL_PORT'], 587)
self.assertEqual(email_config['EMAIL_USE_TLS'], True)
def test_json_value(self):
self.assertEqual(self.JSON, self.env.json('JSON_VAR'))
def test_path(self):
root = self.env.path('PATH_VAR')
self.assertTypeAndValue(Path, Path(self.PATH), root)
def test_smart_cast(self):
self.assertEqual(self.env.get_value('STR_VAR', default='string'), 'bar')
self.assertEqual(self.env.get_value('BOOL_TRUE_VAR', default=True), True)
self.assertEqual(self.env.get_value('BOOL_FALSE_VAR', default=True), False)
self.assertEqual(self.env.get_value('INT_VAR', default=1), 42)
self.assertEqual(self.env.get_value('FLOAT_VAR', default=1.2), 33.3)
def test_exported(self):
self.assertEqual(self.EXPORTED, self.env('EXPORTED_VAR'))
class FileEnvTests(EnvTests):
def setUp(self):
super(FileEnvTests, self).setUp()
Env.ENVIRON = {}
self.env = Env()
file_path = Path(__file__, is_file=True)('test_env.txt')
self.env.read_env(file_path, PATH_VAR=Path(__file__, is_file=True).__root__)
class SubClassTests(EnvTests):
def setUp(self):
super(SubClassTests, self).setUp()
self.CONFIG = self.generateData()
class MyEnv(Env):
ENVIRON = self.CONFIG
self.env = MyEnv()
def test_singleton_environ(self):
self.assertTrue(self.CONFIG is self.env.ENVIRON)
class SchemaEnvTests(BaseTests):
def test_schema(self):
env = Env(INT_VAR=int, NOT_PRESENT_VAR=(float, 33.3), STR_VAR=str,
INT_LIST=[int], DEFAULT_LIST=([int], [2]))
self.assertTypeAndValue(int, 42, env('INT_VAR'))
self.assertTypeAndValue(float, 33.3, env('NOT_PRESENT_VAR'))
self.assertEqual('bar', env('STR_VAR'))
self.assertEqual('foo', env('NOT_PRESENT2', default='foo'))
self.assertTypeAndValue(list, [42, 33], env('INT_LIST'))
self.assertTypeAndValue(list, [2], env('DEFAULT_LIST'))
# Override schema in this one case
self.assertTypeAndValue(str, '42', env('INT_VAR', cast=str))
class DatabaseTestSuite(unittest.TestCase):
def test_postgres_parsing(self):
url = 'postgres://uf07k1i6d8ia0v:wegauwhgeuioweg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722r2kuvn'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], DJANGO_POSTGRES)
self.assertEqual(url['NAME'], 'd8r82722r2kuvn')
self.assertEqual(url['HOST'], 'ec2-107-21-253-135.compute-1.amazonaws.com')
self.assertEqual(url['USER'], 'uf07k1i6d8ia0v')
self.assertEqual(url['PASSWORD'], 'wegauwhgeuioweg')
self.assertEqual(url['PORT'], 5431)
def test_postgres_parsing_unix_domain_socket(self):
url = 'postgres:////var/run/postgresql/db'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], DJANGO_POSTGRES)
self.assertEqual(url['NAME'], 'db')
self.assertEqual(url['HOST'], '/var/run/postgresql')
def test_postgis_parsing(self):
url = 'postgis://uf07k1i6d8ia0v:wegauwhgeuioweg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722r2kuvn'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.contrib.gis.db.backends.postgis')
self.assertEqual(url['NAME'], 'd8r82722r2kuvn')
self.assertEqual(url['HOST'], 'ec2-107-21-253-135.compute-1.amazonaws.com')
self.assertEqual(url['USER'], 'uf07k1i6d8ia0v')
self.assertEqual(url['PASSWORD'], 'wegauwhgeuioweg')
self.assertEqual(url['PORT'], 5431)
def test_mysql_gis_parsing(self):
url = 'mysqlgis://uf07k1i6d8ia0v:wegauwhgeuioweg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722r2kuvn'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.contrib.gis.db.backends.mysql')
self.assertEqual(url['NAME'], 'd8r82722r2kuvn')
self.assertEqual(url['HOST'], 'ec2-107-21-253-135.compute-1.amazonaws.com')
self.assertEqual(url['USER'], 'uf07k1i6d8ia0v')
self.assertEqual(url['PASSWORD'], 'wegauwhgeuioweg')
self.assertEqual(url['PORT'], 5431)
def test_cleardb_parsing(self):
url = 'mysql://bea6eb025ca0d8:69772142@us-cdbr-east.cleardb.com/heroku_97681db3eff7580?reconnect=true'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.db.backends.mysql')
self.assertEqual(url['NAME'], 'heroku_97681db3eff7580')
self.assertEqual(url['HOST'], 'us-cdbr-east.cleardb.com')
self.assertEqual(url['USER'], 'bea6eb025ca0d8')
self.assertEqual(url['PASSWORD'], '69772142')
self.assertEqual(url['PORT'], '')
def test_mysql_no_password(self):
url = 'mysql://travis@localhost/test_db'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.db.backends.mysql')
self.assertEqual(url['NAME'], 'test_db')
self.assertEqual(url['HOST'], 'localhost')
self.assertEqual(url['USER'], 'travis')
self.assertEqual(url['PASSWORD'], '')
self.assertEqual(url['PORT'], '')
def test_empty_sqlite_url(self):
url = 'sqlite://'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.db.backends.sqlite3')
self.assertEqual(url['NAME'], ':memory:')
def test_memory_sqlite_url(self):
url = 'sqlite://:memory:'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.db.backends.sqlite3')
self.assertEqual(url['NAME'], ':memory:')
def test_memory_sqlite_url_warns_about_netloc(self):
url = 'sqlite://missing-slash-path'
with warnings.catch_warnings(record=True) as w:
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'django.db.backends.sqlite3')
self.assertEqual(url['NAME'], ':memory:')
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[0].category, UserWarning))
def test_database_options_parsing(self):
url = 'postgres://user:pass@host:1234/dbname?conn_max_age=600'
url = Env.db_url_config(url)
self.assertEqual(url['CONN_MAX_AGE'], 600)
url = 'postgres://user:pass@host:1234/dbname?conn_max_age=None&autocommit=True&atomic_requests=False'
url = Env.db_url_config(url)
self.assertEqual(url['CONN_MAX_AGE'], None)
self.assertEqual(url['AUTOCOMMIT'], True)
self.assertEqual(url['ATOMIC_REQUESTS'], False)
url = 'mysql://user:pass@host:1234/dbname?init_command=SET storage_engine=INNODB'
url = Env.db_url_config(url)
self.assertEqual(url['OPTIONS'], {
'init_command': 'SET storage_engine=INNODB',
})
def test_database_ldap_url(self):
url = 'ldap://cn=admin,dc=nodomain,dc=org:some_secret_password@ldap.nodomain.org/'
url = Env.db_url_config(url)
self.assertEqual(url['ENGINE'], 'ldapdb.backends.ldap')
self.assertEqual(url['HOST'], 'ldap.nodomain.org')
self.assertEqual(url['PORT'], '')
self.assertEqual(url['NAME'], 'ldap://ldap.nodomain.org')
self.assertEqual(url['USER'], 'cn=admin,dc=nodomain,dc=org')
self.assertEqual(url['PASSWORD'], 'some_secret_password')
class CacheTestSuite(unittest.TestCase):
def test_base_options_parsing(self):
url = 'memcache://127.0.0.1:11211/?timeout=0&key_prefix=cache_&key_function=foo.get_key&version=1'
url = Env.cache_url_config(url)
self.assertEqual(url['KEY_PREFIX'], 'cache_')
self.assertEqual(url['KEY_FUNCTION'], 'foo.get_key')
self.assertEqual(url['TIMEOUT'], 0)
self.assertEqual(url['VERSION'], 1)
url = 'redis://127.0.0.1:6379/?timeout=None'
url = Env.cache_url_config(url)
self.assertEqual(url['TIMEOUT'], None)
def test_memcache_parsing(self):
url = 'memcache://127.0.0.1:11211'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.memcached.MemcachedCache')
self.assertEqual(url['LOCATION'], '127.0.0.1:11211')
def test_memcache_pylib_parsing(self):
url = 'pymemcache://127.0.0.1:11211'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.memcached.PyLibMCCache')
self.assertEqual(url['LOCATION'], '127.0.0.1:11211')
def test_memcache_multiple_parsing(self):
url = 'memcache://172.19.26.240:11211,172.19.26.242:11212'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.memcached.MemcachedCache')
self.assertEqual(url['LOCATION'], ['172.19.26.240:11211', '172.19.26.242:11212'])
def test_memcache_socket_parsing(self):
url = 'memcache:///tmp/memcached.sock'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.memcached.MemcachedCache')
self.assertEqual(url['LOCATION'], 'unix:/tmp/memcached.sock')
def test_dbcache_parsing(self):
url = 'dbcache://my_cache_table'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.db.DatabaseCache')
self.assertEqual(url['LOCATION'], 'my_cache_table')
def test_filecache_parsing(self):
url = 'filecache:///var/tmp/django_cache'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.filebased.FileBasedCache')
self.assertEqual(url['LOCATION'], '/var/tmp/django_cache')
def test_filecache_windows_parsing(self):
url = 'filecache://C:/foo/bar'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.filebased.FileBasedCache')
self.assertEqual(url['LOCATION'], 'C:/foo/bar')
def test_locmem_parsing(self):
url = 'locmemcache://'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.locmem.LocMemCache')
self.assertEqual(url['LOCATION'], '')
def test_locmem_named_parsing(self):
url = 'locmemcache://unique-snowflake'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.locmem.LocMemCache')
self.assertEqual(url['LOCATION'], 'unique-snowflake')
def test_dummycache_parsing(self):
url = 'dummycache://'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.dummy.DummyCache')
self.assertEqual(url['LOCATION'], '')
def test_redis_parsing(self):
url = 'rediscache://127.0.0.1:6379/1?client_class=django_redis.client.DefaultClient&password=secret'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], REDIS_DRIVER)
self.assertEqual(url['LOCATION'], 'redis://127.0.0.1:6379/1')
self.assertEqual(url['OPTIONS'], {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'PASSWORD': 'secret',
})
def test_redis_socket_parsing(self):
url = 'rediscache:///path/to/socket:1'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django_redis.cache.RedisCache')
self.assertEqual(url['LOCATION'], 'unix:///path/to/socket:1')
def test_redis_with_password_parsing(self):
url = 'rediscache://:redispass@127.0.0.1:6379/0'
url = Env.cache_url_config(url)
self.assertEqual(REDIS_DRIVER, url['BACKEND'])
self.assertEqual(url['LOCATION'], 'redis://:redispass@127.0.0.1:6379/0')
def test_redis_multi_location_parsing(self):
url = 'rediscache://host1:6379,host2:6379,host3:9999/1'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], REDIS_DRIVER)
self.assertEqual(url['LOCATION'], [
'redis://host1:6379/1',
'redis://host2:6379/1',
'redis://host3:9999/1',
])
def test_redis_socket_url(self):
url = 'redis://:redispass@/path/to/socket.sock?db=0'
url = Env.cache_url_config(url)
self.assertEqual(REDIS_DRIVER, url['BACKEND'])
self.assertEqual(url['LOCATION'], 'unix://:redispass@/path/to/socket.sock')
self.assertEqual(url['OPTIONS'], {
'DB': 0
})
def test_options_parsing(self):
url = 'filecache:///var/tmp/django_cache?timeout=60&max_entries=1000&cull_frequency=0'
url = Env.cache_url_config(url)
self.assertEqual(url['BACKEND'], 'django.core.cache.backends.filebased.FileBasedCache')
self.assertEqual(url['LOCATION'], '/var/tmp/django_cache')
self.assertEqual(url['TIMEOUT'], 60)
self.assertEqual(url['OPTIONS'], {
'MAX_ENTRIES': 1000,
'CULL_FREQUENCY': 0,
})
def test_custom_backend(self):
url = 'memcache://127.0.0.1:5400?foo=option&bars=9001'
backend = 'django_redis.cache.RedisCache'
url = Env.cache_url_config(url, backend)
self.assertEqual(url['BACKEND'], backend)
self.assertEqual(url['LOCATION'], '127.0.0.1:5400')
self.assertEqual(url['OPTIONS'], {
'FOO': 'option',
'BARS': 9001,
})
class SearchTestSuite(unittest.TestCase):
solr_url = 'solr://127.0.0.1:8983/solr'
elasticsearch_url = 'elasticsearch://127.0.0.1:9200/index'
whoosh_url = 'whoosh:///home/search/whoosh_index'
xapian_url = 'xapian:///home/search/xapian_index'
simple_url = 'simple:///'
def test_solr_parsing(self):
url = Env.search_url_config(self.solr_url)
self.assertEqual(url['ENGINE'], 'haystack.backends.solr_backend.SolrEngine')
self.assertEqual(url['URL'], 'http://127.0.0.1:8983/solr')
def test_solr_multicore_parsing(self):
timeout = 360
index = 'solr_index'
url = '%s/%s?TIMEOUT=%s' % (self.solr_url, index, timeout)
url = Env.search_url_config(url)
self.assertEqual(url['ENGINE'], 'haystack.backends.solr_backend.SolrEngine')
self.assertEqual(url['URL'], 'http://127.0.0.1:8983/solr/solr_index')
self.assertEqual(url['TIMEOUT'], timeout)
self.assertTrue('INDEX_NAME' not in url)
self.assertTrue('PATH' not in url)
def test_elasticsearch_parsing(self):
timeout = 360
url = '%s?TIMEOUT=%s' % (self.elasticsearch_url, timeout)
url = Env.search_url_config(url)
self.assertEqual(url['ENGINE'], 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine')
self.assertTrue('INDEX_NAME' in url.keys())
self.assertEqual(url['INDEX_NAME'], 'index')
self.assertTrue('TIMEOUT' in url.keys())
self.assertEqual(url['TIMEOUT'], timeout)
self.assertTrue('PATH' not in url)
def test_whoosh_parsing(self):
storage = 'file' # or ram
post_limit = 128 * 1024 * 1024
url = '%s?STORAGE=%s&POST_LIMIT=%s' % (self.whoosh_url, storage, post_limit)
url = Env.search_url_config(url)
self.assertEqual(url['ENGINE'], 'haystack.backends.whoosh_backend.WhooshEngine')
self.assertTrue('PATH' in url.keys())
self.assertEqual(url['PATH'], '/home/search/whoosh_index')
self.assertTrue('STORAGE' in url.keys())
self.assertEqual(url['STORAGE'], storage)
self.assertTrue('POST_LIMIT' in url.keys())
self.assertEqual(url['POST_LIMIT'], post_limit)
self.assertTrue('INDEX_NAME' not in url)
def test_xapian_parsing(self):
flags = 'myflags'
url = '%s?FLAGS=%s' % (self.xapian_url, flags)
url = Env.search_url_config(url)
self.assertEqual(url['ENGINE'], 'haystack.backends.xapian_backend.XapianEngine')
self.assertTrue('PATH' in url.keys())
self.assertEqual(url['PATH'], '/home/search/xapian_index')
self.assertTrue('FLAGS' in url.keys())
self.assertEqual(url['FLAGS'], flags)
self.assertTrue('INDEX_NAME' not in url)
def test_simple_parsing(self):
url = Env.search_url_config(self.simple_url)
self.assertEqual(url['ENGINE'], 'haystack.backends.simple_backend.SimpleEngine')
self.assertTrue('INDEX_NAME' not in url)
self.assertTrue('PATH' not in url)
def test_common_args_parsing(self):
excluded_indexes = 'myapp.indexes.A,myapp.indexes.B'
include_spelling = 1
batch_size = 100
params = 'EXCLUDED_INDEXES=%s&INCLUDE_SPELLING=%s&BATCH_SIZE=%s' % (
excluded_indexes,
include_spelling,
batch_size
)
for url in [
self.solr_url,
self.elasticsearch_url,
self.whoosh_url,
self.xapian_url,
self.simple_url,
]:
url = '?'.join([url, params])
url = Env.search_url_config(url)
self.assertTrue('EXCLUDED_INDEXES' in url.keys())
self.assertTrue('myapp.indexes.A' in url['EXCLUDED_INDEXES'])
self.assertTrue('myapp.indexes.B' in url['EXCLUDED_INDEXES'])
self.assertTrue('INCLUDE_SPELLING'in url.keys())
self.assertTrue(url['INCLUDE_SPELLING'])
self.assertTrue('BATCH_SIZE' in url.keys())
self.assertEqual(url['BATCH_SIZE'], 100)
class EmailTests(unittest.TestCase):
def test_smtp_parsing(self):
url = 'smtps://user@domain.com:password@smtp.example.com:587'
url = Env.email_url_config(url)
self.assertEqual(url['EMAIL_BACKEND'], 'django.core.mail.backends.smtp.EmailBackend')
self.assertEqual(url['EMAIL_HOST'], 'smtp.example.com')
self.assertEqual(url['EMAIL_HOST_PASSWORD'], 'password')
self.assertEqual(url['EMAIL_HOST_USER'], 'user@domain.com')
self.assertEqual(url['EMAIL_PORT'], 587)
self.assertEqual(url['EMAIL_USE_TLS'], True)
class PathTests(unittest.TestCase):
def test_path_class(self):
root = Path(__file__, '..', is_file=True)
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
self.assertEqual(root(), root_path)
self.assertEqual(root.__root__, root_path)
web = root.path('public')
self.assertEqual(web(), os.path.join(root_path, 'public'))
self.assertEqual(web('css'), os.path.join(root_path, 'public', 'css'))
def test_required_path(self):
self.assertRaises(ImproperlyConfigured, Path, '/not/existing/path/', required=True)
self.assertRaises(ImproperlyConfigured, Path(__file__), 'not_existing_path', required=True)
def test_comparison(self):
self.assertTrue(Path('/home') in Path('/'))
self.assertTrue(Path('/home') not in Path('/other/dir'))
self.assertTrue(Path('/home') == Path('/home'))
self.assertTrue(Path('/home') != Path('/home/dev'))
self.assertEqual(Path('/home/foo/').rfind('/'), str(Path('/home/foo')).rfind('/'))
self.assertEqual(Path('/home/foo/').find('/home'), str(Path('/home/foo/')).find('/home'))
self.assertEqual(Path('/home/foo/')[1], str(Path('/home/foo/'))[1])
self.assertEqual(Path('/home/foo/').__fspath__(), str(Path('/home/foo/')))
self.assertEqual(~Path('/home'), Path('/'))
self.assertEqual(Path('/') + 'home', Path('/home'))
self.assertEqual(Path('/') + '/home/public', Path('/home/public'))
self.assertEqual(Path('/home/dev/public') - 2, Path('/home'))
self.assertEqual(Path('/home/dev/public') - 'public', Path('/home/dev'))
self.assertRaises(TypeError, lambda _: Path('/home/dev/') - 'not int')
def load_suite():
test_suite = unittest.TestSuite()
cases = [
EnvTests, FileEnvTests, SubClassTests, SchemaEnvTests, PathTests,
DatabaseTestSuite, CacheTestSuite, EmailTests, SearchTestSuite
]
for case in cases:
test_suite.addTest(unittest.makeSuite(case))
return test_suite
if __name__ == "__main__":
try:
if sys.argv[1] == '-o':
for key, value in BaseTests.generateData().items():
print("{0}={1}".format(key, value))
sys.exit()
except IndexError:
pass
unittest.TextTestRunner().run(load_suite())
| 42.618863
| 124
| 0.643951
|
10bfd093ed1b24a818b32e00a3605d320b1cbe6c
| 2,311
|
py
|
Python
|
app/core/tests/test_models.py
|
Pkawa/DRF-portfolio-project
|
a9029957ca551eeabc442d09195c603fcf7681fb
|
[
"MIT"
] | 1
|
2021-07-07T00:27:35.000Z
|
2021-07-07T00:27:35.000Z
|
app/core/tests/test_models.py
|
Pkawa/DRF-portfolio-project
|
a9029957ca551eeabc442d09195c603fcf7681fb
|
[
"MIT"
] | null | null | null |
app/core/tests/test_models.py
|
Pkawa/DRF-portfolio-project
|
a9029957ca551eeabc442d09195c603fcf7681fb
|
[
"MIT"
] | 1
|
2020-05-21T17:42:27.000Z
|
2020-05-21T17:42:27.000Z
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='someexampleemail@test.com', password='examplepassword'):
"""Create a sample user."""
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = 'someexampleemail@test.com'
password = 'examplepassword'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = 'someexampleemail@TEST.com'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'test@superuser.com',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
"""Test the tag string representation"""
tag = models.Tag.objects.create(
user=sample_user(),
name='Cycling'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
"""Test the ingredient string representation."""
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Avocado'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
"""Test the recipe string representation"""
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Guacamole',
time_minutes=10
)
self.assertEqual(str(recipe), recipe.title)
| 30.407895
| 79
| 0.640848
|
42e1cc0d06051662de9ec171582c4ee74dc24764
| 19,705
|
py
|
Python
|
python/quadruped_reactive_walking/crocoddyl_class/MPC_crocoddyl_planner.py
|
nim65s/quadruped-reactive-walking
|
1e0f4069fd11af85abf10bfc8f9d66200c672646
|
[
"BSD-2-Clause"
] | 6
|
2021-03-03T10:59:19.000Z
|
2022-03-13T15:05:25.000Z
|
python/quadruped_reactive_walking/crocoddyl_class/MPC_crocoddyl_planner.py
|
nim65s/quadruped-reactive-walking
|
1e0f4069fd11af85abf10bfc8f9d66200c672646
|
[
"BSD-2-Clause"
] | 1
|
2021-04-19T14:02:57.000Z
|
2021-04-19T14:02:57.000Z
|
python/quadruped_reactive_walking/crocoddyl_class/MPC_crocoddyl_planner.py
|
nim65s/quadruped-reactive-walking
|
1e0f4069fd11af85abf10bfc8f9d66200c672646
|
[
"BSD-2-Clause"
] | 3
|
2021-03-05T08:12:05.000Z
|
2021-09-23T15:35:28.000Z
|
# coding: utf8
import sys
import os
from sys import argv
sys.path.insert(0, os.getcwd()) # adds current directory to python path
import crocoddyl
import numpy as np
import quadruped_walkgen
import utils
import pinocchio as pin
class MPC_crocoddyl_planner():
"""Wrapper class for the MPC problem to call the ddp solver and
retrieve the results.
Args:
dt (float): time step of the MPC
T_mpc (float): Duration of the prediction horizon
mu (float): Friction coefficient
inner(bool): Inside or outside approximation of the friction cone
"""
def __init__(self, dt = 0.02 , T_mpc = 0.32 , mu = 1, inner = True , warm_start = False , min_fz = 0.0 , n_periods = 1):
# Time step of the solver
self.dt = dt
# Period of the MPC
self.T_mpc = T_mpc
# Number of period : not used yet
self.n_periods = n_periods
# Mass of the robot
self.mass = 2.50000279
# Inertia matrix of the robot in body frame
# self.gI = np.diag([0.00578574, 0.01938108, 0.02476124])
self.gI = np.array([[3.09249e-2, -8.00101e-7, 1.865287e-5],
[-8.00101e-7, 5.106100e-2, 1.245813e-4],
[1.865287e-5, 1.245813e-4, 6.939757e-2]])
# Friction coefficient
if inner :
self.mu = (1/np.sqrt(2))*mu
else:
self.mu = mu
# Weights Vector : States
# self.stateWeights = np.array([1,1,150,35,30,8,20,20,15,4,4,8])
# Weights Vector : States
self.w_x = 0.3
self.w_y = 0.3
self.w_z = 2
self.w_roll = 0.9
self.w_pitch = 1.
self.w_yaw = 0.4
self.w_vx = 1.5*np.sqrt(self.w_x)
self.w_vy = 2*np.sqrt(self.w_y)
self.w_vz = 1*np.sqrt(self.w_z)
self.w_vroll = 0.05*np.sqrt(self.w_roll)
self.w_vpitch = 0.07*np.sqrt(self.w_pitch)
self.w_vyaw = 0.05*np.sqrt(self.w_yaw)
self.stateWeights = np.array([self.w_x, self.w_y, self.w_z, self.w_roll, self.w_pitch, self.w_yaw,
self.w_vx, self.w_vy, self.w_vz, self.w_vroll, self.w_vpitch, self.w_vyaw])
# Weight Vector : Force Norm
# self.forceWeights = np.full(12,0.02)
self.forceWeights = np.array(4*[0.01,0.01,0.01])
# Weight Vector : Friction cone cost
# self.frictionWeights = 10
self.frictionWeights = 0.5
# Max iteration ddp solver
self.max_iteration = 10
# Warm Start for the solver
self.warm_start = warm_start
# Minimum normal force (N)
self.min_fz = min_fz
# Gait matrix
self.gait = np.zeros((20, 5))
self.gait_old = np.zeros((20, 5))
self.index = 0
# Position of the feet in local frame
self.fsteps = np.full((20, 13), 0.0)
# List of the actionModel
self.ListAction = []
# Warm start
self.x_init = []
self.u_init = []
# Weights on the shoulder term : term 1
self.shoulderWeights = np.array(4*[0.3,0.4])
# symmetry & centrifugal term in foot position heuristic
self.centrifugal_term = True
self.symmetry_term = True
# Weight on the step command
self.stepWeights = np.full(4,0.8)
# Weights on the previous position predicted : term 2
self.lastPositionWeights = np.full(8,2.)
# When the the foot reaches 10% of the flying phase, the optimisation of the foot
# positions stops by setting the "lastPositionWeight" on.
# For exemple, if T_mpc = 0.32s, dt = 0.02s, one flying phase period lasts 7 nodes.
# When there are 6 knots left before changing steps, the next knot will have its relative weight activated
self.stop_optim = 0.1
self.index_stop = int((1 - self.stop_optim)*(int(0.5*self.T_mpc/self.dt) - 1))
# Index of the control cycle to start the "stopping optimisation"
self.start_stop_optim = 20
# Predicted position of feet computed by previous cycle, it will be used with
# the self.lastPositionWeights weight.
self.oMl = pin.SE3.Identity() # transform from world to local frame ("L")
self.l_fsteps = np.zeros((3,4))
self.o_fsteps = np.zeros((3,4))
# Shooting problem
self.problem = None
# ddp solver
self.ddp = None
# Xs results without the actionStepModel
self.Xs = np.zeros((20,int(T_mpc/dt)*n_periods))
# self.Us = np.zeros((12,int(T_mpc/dt)))
# Initial foot location (local frame, X,Y plan)
self.p0 = [ 0.1946,0.15005, 0.1946,-0.15005, -0.1946, 0.15005 ,-0.1946, -0.15005]
def solve(self, k, xref , l_feet , oMl = pin.SE3.Identity()):
""" Solve the MPC problem
Args:
k : Iteration
xref : the desired state vector
l_feet : current position of the feet
"""
# Update the dynamic depending on the predicted feet position
self.updateProblem( k , xref , l_feet , oMl)
# Solve problem
self.ddp.solve(self.x_init,self.u_init, self.max_iteration)
# Get the results
self.get_fsteps()
return 0
def updateProblem(self,k,xref , l_feet , oMl = pin.SE3.Identity()):
"""Update the dynamic of the model list according to the predicted position of the feet,
and the desired state.
Args:
"""
self.oMl = oMl
# position of foot predicted by previous gait cycle in world frame
for i in range(4):
self.l_fsteps[:,i] = self.oMl.inverse() * self.o_fsteps[:,i]
if k > 0:
# Move one step further in the gait
# Add and remove step model in the list of model
self.roll()
# Update initial state of the problem
if np.sum(self.gait[0,1:]) == 4 :
# 4 contact --> need previous control cycle to know which foot was on the ground
# On swing phase before --> initialised below shoulder
p0 = np.repeat(np.array([1,1,1,1])-self.gait_old[0,1:],2)*self.p0
# On the ground before --> initialised with the current feet position
p0 += np.repeat(self.gait_old[0,1:],2)*l_feet[0:2,:].reshape(8, order = 'F')
else :
# On swing phase before --> initialised below shoulder
p0 = np.repeat(np.array([1,1,1,1])-self.gait[0,1:],2)*self.p0
# On the ground before --> initialised with the current feet position
p0 += np.repeat(self.gait[0,1:],2)*l_feet[0:2,:].reshape(8, order = 'F')
else :
# Create gait matrix
self.create_walking_trot()
self.gait_old = self.gait
# First step : create the list of model
self.create_List_model()
# According to the current footstepplanner, the walk start on the next phase
self.roll()
# Update initial state of the problem with the shoulder position
p0 = self.p0
j = 0
k_cum = 0
L = []
# Iterate over all phases of the gait
# The first column of xref correspond to the current state
# Gap introduced to take into account the Step model (more nodes than gait phases )
self.x_init = []
self.u_init = []
gap = 0
while (self.gait[j, 0] != 0):
for i in range(k_cum, k_cum+np.int(self.gait[j, 0])):
if self.ListAction[i].__class__.__name__ == "ActionModelQuadrupedStep" :
self.x_init.append(np.zeros(20))
self.u_init.append(np.zeros(4))
if i == 0 :
self.ListAction[i].updateModel(np.reshape(self.l_fsteps, (3, 4), order='F') , xref[:, i+gap] , self.gait[0, 1:] - self.gait_old[0, 1:])
else :
self.ListAction[i].updateModel(np.reshape(self.l_fsteps, (3, 4), order='F') , xref[:, i+gap] , self.gait[j, 1:] - self.gait[j-1, 1:])
self.ListAction[i+1].updateModel(np.reshape(self.l_fsteps, (3, 4), order='F') , xref[:, i+gap] , self.gait[j, 1:])
self.x_init.append(np.zeros(20))
self.u_init.append(np.zeros(12))
k_cum += 1
gap -= 1
# self.ListAction[i+1].shoulderWeights = 2*np.array(4*[0.25,0.3])
else :
self.ListAction[i].updateModel(np.reshape(self.l_fsteps, (3, 4), order='F') , xref[:, i+gap] , self.gait[j, 1:])
self.x_init.append(np.zeros(20))
self.u_init.append(np.zeros(12))
k_cum += np.int(self.gait[j, 0])
j += 1
if k > self.start_stop_optim :
# Update the lastPositionweight
self.updatePositionWeights()
# # Update model of the terminal model
self.terminalModel.updateModel(np.reshape(self.fsteps[j-1, 1:], (3, 4), order='F') , xref[:,-1] , self.gait[j-1, 1:])
self.x_init.append(np.zeros(20))
# Shooting problem
self.problem = crocoddyl.ShootingProblem(np.zeros(20), self.ListAction, self.terminalModel)
self.problem.x0 = np.concatenate([xref[:,0] , p0 ])
# DDP Solver
self.ddp = crocoddyl.SolverDDP(self.problem)
return 0
def get_latest_result(self):
"""Return the desired contact forces that have been computed by the last iteration of the MPC
Args:
"""
if self.ListAction[0].__class__.__name__ == "ActionModelQuadrupedStep" :
return np.repeat(self.gait[0,1:] , 3)*np.reshape(np.asarray(self.ddp.us[1]) , (12,))
else :
return np.repeat(self.gait[0,1:] , 3)*np.reshape(np.asarray(self.ddp.us[0]) , (12,))
def update_model_augmented(self , model):
'''Set intern parameters for augmented model type
'''
# Model parameters
model.dt = self.dt
model.mass = self.mass
model.gI = self.gI
model.mu = self.mu
model.min_fz = self.min_fz
# Weights vectors
model.stateWeights = self.stateWeights
model.forceWeights = self.forceWeights
model.frictionWeights = self.frictionWeights
# Weight on feet position
# will be set when needed
model.lastPositionWeights = np.full(8,0.0)
model.shoulderWeights = self.shoulderWeights
model.symmetry_term = self.symmetry_term
model.centrifugal_term = self.centrifugal_term
return 0
def update_model_step(self , model):
"""Set intern parameters for step model type
"""
model.shoulderWeights = self.shoulderWeights
model.stateWeights = self.stateWeights
model.stepWeights = self.stepWeights
model.symmetry_term = self.symmetry_term
model.centrifugal_term = self.centrifugal_term
return 0
def create_List_model(self):
"""Create the List model using ActionQuadrupedModel()
The same model cannot be used [model]*(T_mpc/dt) because the dynamic changes for each nodes
"""
j = 0
k_cum = 0
# Iterate over all phases of the gait
# The first column of xref correspond to the current state
while (self.gait[j, 0] != 0):
for i in range(k_cum, k_cum+np.int(self.gait[j, 0])):
model = quadruped_walkgen.ActionModelQuadrupedAugmented()
# Update intern parameters
self.update_model_augmented(model)
# Add model to the list of model
self.ListAction.append(model)
if np.sum(self.gait[j+1, 1:]) == 4 : # No optimisation on the first line
model = quadruped_walkgen.ActionModelQuadrupedStep()
# Update intern parameters
self.update_model_step(model)
# Add model to the list of model
self.ListAction.append(model)
k_cum += np.int(self.gait[j, 0])
j += 1
# Model parameters of terminal node
self.terminalModel = quadruped_walkgen.ActionModelQuadrupedAugmented()
self.update_model_augmented(self.terminalModel)
# Weights vectors of terminal node
self.terminalModel.forceWeights = np.zeros(12)
self.terminalModel.frictionWeights = 0.
self.terminalModel.shoulderWeights = np.full(8,0.0)
self.terminalModel.lastPositionWeights = np.full(8,0.0)
# Shooting problem
self.problem = crocoddyl.ShootingProblem(np.zeros(20), self.ListAction, self.terminalModel)
# DDP Solver
self.ddp = crocoddyl.SolverDDP(self.problem)
return 0
def create_walking_trot(self):
"""Create the matrices used to handle the gait and initialize them to perform a walking trot
self.gait and self.fsteps matrices contains information about the walking trot
"""
# Number of timesteps in a half period of gait
N = np.int(0.5 * self.T_mpc/self.dt)
# Starting status of the gait
# 4-stance phase, 2-stance phase, 4-stance phase, 2-stance phase
self.gait = np.zeros((self.fsteps.shape[0], 5))
for i in range(self.n_periods):
self.gait[(4*i):(4*(i+1)), 0] = np.array([1, N-1, 1, N-1])
self.fsteps[(4*i):(4*(i+1)), 0] = self.gait[(4*i):(4*(i+1)), 0]
# Set stance and swing phases
# Coefficient (i, j) is equal to 0.0 if the j-th feet is in swing phase during the i-th phase
# Coefficient (i, j) is equal to 1.0 if the j-th feet is in stance phase during the i-th phase
self.gait[4*i+0, 1:] = np.ones((4,))
self.gait[4*i+1, [1, 4]] = np.ones((2,))
self.gait[4*i+2, 1:] = np.ones((4,))
self.gait[4*i+3, [2, 3]] = np.ones((2,))
return 0
def roll(self):
"""Move one step further in the gait cycle
Decrease by 1 the number of remaining step for the current phase of the gait and increase
by 1 the number of remaining step for the last phase of the gait (periodic motion)
Add and remove corresponding model in ListAction
"""
self.gait_old = self.gait
# Index of the first empty line
index = next((idx for idx, val in np.ndenumerate(self.gait[:, 0]) if val==0.0), 0.0)[0]
# Create a new phase if needed or increase the last one by 1 step
if np.array_equal(self.gait[0, 1:], self.gait[index-1, 1:]):
self.gait[index-1, 0] += 1.0
else:
self.gait[index, 1:] = self.gait[0, 1:]
self.gait[index, 0] = 1.0
# Remove first model
if self.ListAction[0].__class__.__name__ == "ActionModelQuadrupedStep" :
self.ListAction.pop(0)
model = self.ListAction.pop(0)
# Decrease the current phase by 1 step and delete it if it has ended
if self.gait[0, 0] > 1.0:
self.gait[0, 0] -= 1.0
else:
self.gait = np.roll(self.gait, -1, axis=0)
self.gait[-1, :] = np.zeros((5, ))
# Get new Index of the first empty line
index = next((idx for idx, val in np.ndenumerate(self.gait[:, 0]) if val==0.0), 0.0)[0]
# Add last model & step model if needed
if np.sum(self.gait[index - 1, 1:]) == 4 and self.gait[index - 1, 0 ] != 0:
modelStep = quadruped_walkgen.ActionModelQuadrupedStep()
self.update_model_step(modelStep)
# Add model to the list of model
self.ListAction.append(modelStep)
#reset to 0 the weight lastPosition
model.lastPositionWeights = np.full(8,0.0)
self.ListAction.append(model)
return 0
def get_fsteps(self):
"""Create the matrices fstep, the position of the feet predicted during the control cycle.
To be used after the solve function.
"""
##################################################
# Get command vector without actionModelStep node
##################################################
Us = self.ddp.us
for elt in Us :
if len(elt) == 4 :
Us.remove(elt)
self.Us = np.array(Us)[:,:].transpose()
################################################
# Get state vector without actionModelStep node
################################################
# self.Xs[:,0 ] = np.array(self.ddp.xs[0])
k = 1
gap = 1
for elt in self.ListAction :
if elt.__class__.__name__ != "ActionModelQuadrupedStep" :
self.Xs[:,k - gap ] = np.array(self.ddp.xs[k])
else :
gap += 1
k = k + 1
########################################
# Compute fsteps using the state vector
########################################
j = 0
k_cum = 0
self.fsteps[:,0] = self.gait[:,0]
# Iterate over all phases of the gait
while (self.gait[j, 0] != 0):
for i in range(k_cum, k_cum+np.int(self.gait[j, 0])):
self.fsteps[j ,1: ] = np.repeat(self.gait[j,1:] , 3)*np.concatenate([self.Xs[12:14 , k_cum ],[0.],self.Xs[14:16 , k_cum ],[0.],
self.Xs[16:18 , k_cum ],[0.],self.Xs[18:20 , k_cum ],[0.]])
k_cum += np.int(self.gait[j, 0])
j += 1
####################################################
# Compute the current position of feet in contact
# and the position of desired feet in flying phase
# in local frame
#####################################################
for i in range(4):
index = next((idx for idx, val in np.ndenumerate(self.fsteps[:, 3*i+1]) if ((not (val==0)) and (not np.isnan(val)))), [-1])[0]
pos_tmp = np.reshape(np.array(self.oMl * (np.array([self.fsteps[index, (1+i*3):(4+i*3)]]).transpose())) , (3,1) )
self.o_fsteps[:2, i] = pos_tmp[0:2, 0]
return self.fsteps
def updatePositionWeights(self) :
"""Update the parameters in the ListAction to keep the next foot position at the same position computed by the
previous control cycle and avoid re-optimization at the end of the flying phase
"""
if self.gait[0,0] == self.index_stop :
self.ListAction[int(self.gait[0,0])+ 1].lastPositionWeights = np.repeat((np.array([1,1,1,1]) - self.gait[0,1:]) , 2 )* self.lastPositionWeights
return 0
def get_xrobot(self):
"""Returns the state vectors predicted by the mpc throughout the time horizon, the initial column is deleted as it corresponds
initial state vector
Args:
"""
return np.array(self.ddp.xs)[1:,:].transpose()
def get_fpredicted(self):
"""Returns the force vectors command predicted by the mpc throughout the time horizon,
Args:
"""
return np.array(self.ddp.us)[:,:].transpose()[:,:]
| 37.109228
| 160
| 0.548541
|
455aff1f414f049a16ef79218582c429bcdfc601
| 378
|
py
|
Python
|
answers/vjha21/Day9/question1.py
|
arc03/30-DaysOfCode-March-2021
|
6d6e11bf70280a578113f163352fa4fa8408baf6
|
[
"MIT"
] | 22
|
2021-03-16T14:07:47.000Z
|
2021-08-13T08:52:50.000Z
|
answers/vjha21/Day9/question1.py
|
arc03/30-DaysOfCode-March-2021
|
6d6e11bf70280a578113f163352fa4fa8408baf6
|
[
"MIT"
] | 174
|
2021-03-16T21:16:40.000Z
|
2021-06-12T05:19:51.000Z
|
answers/vjha21/Day9/question1.py
|
arc03/30-DaysOfCode-March-2021
|
6d6e11bf70280a578113f163352fa4fa8408baf6
|
[
"MIT"
] | 135
|
2021-03-16T16:47:12.000Z
|
2021-06-27T14:22:38.000Z
|
##From a given array, return indices of the numbers such that they add up to target.
def check_sum(array, key):
for i in range(0, len(array) - 1):
for j in range(i + 1, len(array) - 1):
if (array[i] + array[j]) == key:
return [i, j]
if __name__ == "__main__":
nums = [2, 7, 11, 15]
target = 9
print(check_sum(nums, target))
| 29.076923
| 84
| 0.560847
|
962a3038ab8e6d016cb515e497bbf12ca6792abe
| 21,525
|
py
|
Python
|
asteroid/masknn/recurrent.py
|
53X/asteroid
|
69e82fed49bab84975592ae868aaf6dceb91d6cd
|
[
"MIT"
] | 1
|
2020-12-01T07:02:45.000Z
|
2020-12-01T07:02:45.000Z
|
asteroid/masknn/recurrent.py
|
53X/asteroid
|
69e82fed49bab84975592ae868aaf6dceb91d6cd
|
[
"MIT"
] | null | null | null |
asteroid/masknn/recurrent.py
|
53X/asteroid
|
69e82fed49bab84975592ae868aaf6dceb91d6cd
|
[
"MIT"
] | null | null | null |
import functools
import numpy as np
import torch
from torch import nn
from torch.nn.functional import fold, unfold
from .. import complex_nn
from ..utils import has_arg
from . import activations, norms
from ._dccrn_architectures import DCCRN_ARCHITECTURES
from .base import BaseDCUMaskNet
from .norms import CumLN, GlobLN
class SingleRNN(nn.Module):
"""Module for a RNN block.
Inspired from https://github.com/yluo42/TAC/blob/master/utility/models.py
Licensed under CC BY-NC-SA 3.0 US.
Args:
rnn_type (str): Select from ``'RNN'``, ``'LSTM'``, ``'GRU'``. Can
also be passed in lowercase letters.
input_size (int): Dimension of the input feature. The input should have
shape [batch, seq_len, input_size].
hidden_size (int): Dimension of the hidden state.
n_layers (int, optional): Number of layers used in RNN. Default is 1.
dropout (float, optional): Dropout ratio. Default is 0.
bidirectional (bool, optional): Whether the RNN layers are
bidirectional. Default is ``False``.
"""
def __init__(
self, rnn_type, input_size, hidden_size, n_layers=1, dropout=0, bidirectional=False
):
super(SingleRNN, self).__init__()
assert rnn_type.upper() in ["RNN", "LSTM", "GRU"]
rnn_type = rnn_type.upper()
self.rnn_type = rnn_type
self.input_size = input_size
self.hidden_size = hidden_size
self.rnn = getattr(nn, rnn_type)(
input_size,
hidden_size,
num_layers=n_layers,
dropout=dropout,
batch_first=True,
bidirectional=bool(bidirectional),
)
def forward(self, inp):
""" Input shape [batch, seq, feats] """
self.rnn.flatten_parameters() # Enables faster multi-GPU training.
output = inp
rnn_output, _ = self.rnn(output)
return rnn_output
class StackedResidualRNN(nn.Module):
"""Stacked RNN with builtin residual connection.
Only supports forward RNNs.
See StackedResidualBiRNN for bidirectional ones.
Args:
rnn_type (str): Select from ``'RNN'``, ``'LSTM'``, ``'GRU'``. Can
also be passed in lowercase letters.
n_units (int): Number of units in recurrent layers. This will also be
the expected input size.
n_layers (int): Number of recurrent layers.
dropout (float): Dropout value, between 0. and 1. (Default: 0.)
bidirectional (bool): If True, use bidirectional RNN, else
unidirectional. (Default: False)
"""
def __init__(self, rnn_type, n_units, n_layers=4, dropout=0.0, bidirectional=False):
super(StackedResidualRNN, self).__init__()
self.rnn_type = rnn_type
self.n_units = n_units
self.n_layers = n_layers
self.dropout = dropout
assert bidirectional is False, "Bidirectional not supported yet"
self.bidirectional = bidirectional
self.layers = nn.ModuleList()
for _ in range(n_layers):
self.layers.append(
SingleRNN(
rnn_type, input_size=n_units, hidden_size=n_units, bidirectional=bidirectional
)
)
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, x):
"""Builtin residual connections + dropout applied before residual.
Input shape : [batch, time_axis, feat_axis]
"""
for rnn in self.layers:
rnn_out = rnn(x)
dropped_out = self.dropout_layer(rnn_out)
x = x + dropped_out
return x
class StackedResidualBiRNN(nn.Module):
"""Stacked Bidirectional RNN with builtin residual connection.
Residual connections are applied on both RNN directions.
Only supports bidiriectional RNNs.
See StackedResidualRNN for unidirectional ones.
Args:
rnn_type (str): Select from ``'RNN'``, ``'LSTM'``, ``'GRU'``. Can
also be passed in lowercase letters.
n_units (int): Number of units in recurrent layers. This will also be
the expected input size.
n_layers (int): Number of recurrent layers.
dropout (float): Dropout value, between 0. and 1. (Default: 0.)
bidirectional (bool): If True, use bidirectional RNN, else
unidirectional. (Default: False)
"""
def __init__(self, rnn_type, n_units, n_layers=4, dropout=0.0, bidirectional=True):
super().__init__()
self.rnn_type = rnn_type
self.n_units = n_units
self.n_layers = n_layers
self.dropout = dropout
assert bidirectional is True, "Only bidirectional not supported yet"
self.bidirectional = bidirectional
# The first layer has as many units as input size
self.first_layer = SingleRNN(
rnn_type, input_size=n_units, hidden_size=n_units, bidirectional=bidirectional
)
# As the first layer outputs 2*n_units, the following layers need
# 2*n_units as input size
self.layers = nn.ModuleList()
for i in range(n_layers - 1):
input_size = 2 * n_units
self.layers.append(
SingleRNN(
rnn_type,
input_size=input_size,
hidden_size=n_units,
bidirectional=bidirectional,
)
)
self.dropout_layer = nn.Dropout(self.dropout)
def forward(self, x):
"""Builtin residual connections + dropout applied before residual.
Input shape : [batch, time_axis, feat_axis]
"""
# First layer
rnn_out = self.first_layer(x)
dropped_out = self.dropout_layer(rnn_out)
x = torch.cat([x, x], dim=-1) + dropped_out
# Rest of the layers
for rnn in self.layers:
rnn_out = rnn(x)
dropped_out = self.dropout_layer(rnn_out)
x = x + dropped_out
return x
class DPRNNBlock(nn.Module):
"""Dual-Path RNN Block as proposed in [1].
Args:
in_chan (int): Number of input channels.
hid_size (int): Number of hidden neurons in the RNNs.
norm_type (str, optional): Type of normalization to use. To choose from
- ``'gLN'``: global Layernorm
- ``'cLN'``: channelwise Layernorm
bidirectional (bool, optional): True for bidirectional Inter-Chunk RNN.
rnn_type (str, optional): Type of RNN used. Choose from ``'RNN'``,
``'LSTM'`` and ``'GRU'``.
num_layers (int, optional): Number of layers used in each RNN.
dropout (float, optional): Dropout ratio. Must be in [0, 1].
References:
[1] "Dual-path RNN: efficient long sequence modeling for
time-domain single-channel speech separation", Yi Luo, Zhuo Chen
and Takuya Yoshioka. https://arxiv.org/abs/1910.06379
"""
def __init__(
self,
in_chan,
hid_size,
norm_type="gLN",
bidirectional=True,
rnn_type="LSTM",
num_layers=1,
dropout=0,
):
super(DPRNNBlock, self).__init__()
# IntraRNN and linear projection layer (always bi-directional)
self.intra_RNN = SingleRNN(
rnn_type, in_chan, hid_size, num_layers, dropout=dropout, bidirectional=True
)
self.intra_linear = nn.Linear(hid_size * 2, in_chan)
self.intra_norm = norms.get(norm_type)(in_chan)
# InterRNN block and linear projection layer (uni or bi-directional)
self.inter_RNN = SingleRNN(
rnn_type, in_chan, hid_size, num_layers, dropout=dropout, bidirectional=bidirectional
)
num_direction = int(bidirectional) + 1
self.inter_linear = nn.Linear(hid_size * num_direction, in_chan)
self.inter_norm = norms.get(norm_type)(in_chan)
def forward(self, x):
""" Input shape : [batch, feats, chunk_size, num_chunks] """
B, N, K, L = x.size()
output = x # for skip connection
# Intra-chunk processing
x = x.transpose(1, -1).reshape(B * L, K, N)
x = self.intra_RNN(x)
x = self.intra_linear(x)
x = x.reshape(B, L, K, N).transpose(1, -1)
x = self.intra_norm(x)
output = output + x
# Inter-chunk processing
x = output.transpose(1, 2).transpose(2, -1).reshape(B * K, L, N)
x = self.inter_RNN(x)
x = self.inter_linear(x)
x = x.reshape(B, K, L, N).transpose(1, -1).transpose(2, -1)
x = self.inter_norm(x)
return output + x
class DPRNN(nn.Module):
"""Dual-path RNN Network for Single-Channel Source Separation
introduced in [1].
Args:
in_chan (int): Number of input filters.
n_src (int): Number of masks to estimate.
out_chan (int or None): Number of bins in the estimated masks.
Defaults to `in_chan`.
bn_chan (int): Number of channels after the bottleneck.
Defaults to 128.
hid_size (int): Number of neurons in the RNNs cell state.
Defaults to 128.
chunk_size (int): window size of overlap and add processing.
Defaults to 100.
hop_size (int or None): hop size (stride) of overlap and add processing.
Default to `chunk_size // 2` (50% overlap).
n_repeats (int): Number of repeats. Defaults to 6.
norm_type (str, optional): Type of normalization to use. To choose from
- ``'gLN'``: global Layernorm
- ``'cLN'``: channelwise Layernorm
mask_act (str, optional): Which non-linear function to generate mask.
bidirectional (bool, optional): True for bidirectional Inter-Chunk RNN
(Intra-Chunk is always bidirectional).
rnn_type (str, optional): Type of RNN used. Choose between ``'RNN'``,
``'LSTM'`` and ``'GRU'``.
num_layers (int, optional): Number of layers in each RNN.
dropout (float, optional): Dropout ratio, must be in [0,1].
References:
[1] "Dual-path RNN: efficient long sequence modeling for
time-domain single-channel speech separation", Yi Luo, Zhuo Chen
and Takuya Yoshioka. https://arxiv.org/abs/1910.06379
"""
def __init__(
self,
in_chan,
n_src,
out_chan=None,
bn_chan=128,
hid_size=128,
chunk_size=100,
hop_size=None,
n_repeats=6,
norm_type="gLN",
mask_act="relu",
bidirectional=True,
rnn_type="LSTM",
num_layers=1,
dropout=0,
):
super(DPRNN, self).__init__()
self.in_chan = in_chan
out_chan = out_chan if out_chan is not None else in_chan
self.out_chan = out_chan
self.bn_chan = bn_chan
self.hid_size = hid_size
self.chunk_size = chunk_size
hop_size = hop_size if hop_size is not None else chunk_size // 2
self.hop_size = hop_size
self.n_repeats = n_repeats
self.n_src = n_src
self.norm_type = norm_type
self.mask_act = mask_act
self.bidirectional = bidirectional
self.rnn_type = rnn_type
self.num_layers = num_layers
self.dropout = dropout
layer_norm = norms.get(norm_type)(in_chan)
bottleneck_conv = nn.Conv1d(in_chan, bn_chan, 1)
self.bottleneck = nn.Sequential(layer_norm, bottleneck_conv)
# Succession of DPRNNBlocks.
net = []
for x in range(self.n_repeats):
net += [
DPRNNBlock(
bn_chan,
hid_size,
norm_type=norm_type,
bidirectional=bidirectional,
rnn_type=rnn_type,
num_layers=num_layers,
dropout=dropout,
)
]
self.net = nn.Sequential(*net)
# Masking in 3D space
net_out_conv = nn.Conv2d(bn_chan, n_src * bn_chan, 1)
self.first_out = nn.Sequential(nn.PReLU(), net_out_conv)
# Gating and masking in 2D space (after fold)
self.net_out = nn.Sequential(nn.Conv1d(bn_chan, bn_chan, 1), nn.Tanh())
self.net_gate = nn.Sequential(nn.Conv1d(bn_chan, bn_chan, 1), nn.Sigmoid())
self.mask_net = nn.Conv1d(bn_chan, out_chan, 1, bias=False)
# Get activation function.
mask_nl_class = activations.get(mask_act)
# For softmax, feed the source dimension.
if has_arg(mask_nl_class, "dim"):
self.output_act = mask_nl_class(dim=1)
else:
self.output_act = mask_nl_class()
def forward(self, mixture_w):
"""
Args:
mixture_w (:class:`torch.Tensor`): Tensor of shape
[batch, n_filters, n_frames]
Returns:
:class:`torch.Tensor`
estimated mask of shape [batch, n_src, n_filters, n_frames]
"""
batch, n_filters, n_frames = mixture_w.size()
output = self.bottleneck(mixture_w) # [batch, bn_chan, n_frames]
output = unfold(
output.unsqueeze(-1),
kernel_size=(self.chunk_size, 1),
padding=(self.chunk_size, 0),
stride=(self.hop_size, 1),
)
n_chunks = output.size(-1)
output = output.reshape(batch, self.bn_chan, self.chunk_size, n_chunks)
# Apply stacked DPRNN Blocks sequentially
output = self.net(output)
# Map to sources with kind of 2D masks
output = self.first_out(output)
output = output.reshape(batch * self.n_src, self.bn_chan, self.chunk_size, n_chunks)
# Overlap and add:
# [batch, out_chan, chunk_size, n_chunks] -> [batch, out_chan, n_frames]
to_unfold = self.bn_chan * self.chunk_size
output = fold(
output.reshape(batch * self.n_src, to_unfold, n_chunks),
(n_frames, 1),
kernel_size=(self.chunk_size, 1),
padding=(self.chunk_size, 0),
stride=(self.hop_size, 1),
)
# Apply gating
output = output.reshape(batch * self.n_src, self.bn_chan, -1)
output = self.net_out(output) * self.net_gate(output)
# Compute mask
score = self.mask_net(output)
est_mask = self.output_act(score)
est_mask = est_mask.view(batch, self.n_src, self.out_chan, n_frames)
return est_mask
def get_config(self):
config = {
"in_chan": self.in_chan,
"out_chan": self.out_chan,
"bn_chan": self.bn_chan,
"hid_size": self.hid_size,
"chunk_size": self.chunk_size,
"hop_size": self.hop_size,
"n_repeats": self.n_repeats,
"n_src": self.n_src,
"norm_type": self.norm_type,
"mask_act": self.mask_act,
"bidirectional": self.bidirectional,
"rnn_type": self.rnn_type,
"num_layers": self.num_layers,
"dropout": self.dropout,
}
return config
class LSTMMasker(nn.Module):
"""LSTM mask network introduced in [1], without skip connections.
Args:
in_chan (int): Number of input filters.
n_src (int): Number of masks to estimate.
out_chan (int or None): Number of bins in the estimated masks.
Defaults to `in_chan`.
rnn_type (str, optional): Type of RNN used. Choose between ``'RNN'``,
``'LSTM'`` and ``'GRU'``.
n_layers (int, optional): Number of layers in each RNN.
hid_size (int): Number of neurons in the RNNs cell state.
mask_act (str, optional): Which non-linear function to generate mask.
bidirectional (bool, optional): Whether to use BiLSTM
dropout (float, optional): Dropout ratio, must be in [0,1].
References:
[1]: Yi Luo et al. "Real-time Single-channel Dereverberation and Separation
with Time-domain Audio Separation Network", Interspeech 2018
"""
def __init__(
self,
in_chan,
n_src,
out_chan=None,
rnn_type="lstm",
n_layers=4,
hid_size=512,
dropout=0.3,
mask_act="sigmoid",
bidirectional=True,
):
super().__init__()
self.in_chan = in_chan
self.n_src = n_src
out_chan = out_chan if out_chan is not None else in_chan
self.out_chan = out_chan
self.rnn_type = rnn_type
self.n_layers = n_layers
self.hid_size = hid_size
self.dropout = dropout
self.mask_act = mask_act
self.bidirectional = bidirectional
# Get activation function.
mask_nl_class = activations.get(mask_act)
# For softmax, feed the source dimension.
if has_arg(mask_nl_class, "dim"):
self.output_act = mask_nl_class(dim=1)
else:
self.output_act = mask_nl_class()
# Create TasNet masker
out_size = hid_size * (int(bidirectional) + 1)
if bidirectional:
self.bn_layer = GlobLN(in_chan)
else:
self.bn_layer = CumLN(in_chan)
self.masker = nn.Sequential(
SingleRNN(
"lstm",
in_chan,
hidden_size=hid_size,
n_layers=n_layers,
bidirectional=bidirectional,
dropout=dropout,
),
nn.Linear(out_size, self.n_src * out_chan),
self.output_act,
)
def forward(self, x):
batch_size = x.shape[0]
to_sep = self.bn_layer(x)
est_masks = self.masker(to_sep.transpose(-1, -2)).transpose(-1, -2)
est_masks = est_masks.view(batch_size, self.n_src, self.out_chan, -1)
return est_masks
def get_config(self):
config = {
"in_chan": self.in_chan,
"n_src": self.n_src,
"out_chan": self.out_chan,
"rnn_type": self.rnn_type,
"n_layers": self.n_layers,
"hid_size": self.hid_size,
"dropout": self.dropout,
"mask_act": self.mask_act,
"bidirectional": self.bidirectional,
}
return config
class DCCRMaskNetRNN(nn.Module):
"""RNN (LSTM) layer between encoders and decoders introduced in [1].
Args:
in_size (int): Number of inputs to the RNN. Must be the product of non-batch,
non-time dimensions of output shape of last encoder, i.e. if the last
encoder output shape is [batch, n_chans, n_freqs, time], `in_size` must be
`n_chans * n_freqs`.
hid_size (int, optional): Number of units in RNN.
rnn_type (str, optional): Type of RNN to use. See ``SingleRNN`` for valid values.
norm_type (Optional[str], optional): Norm to use after linear.
See ``asteroid.masknn.norms`` for valid values. (Not used in [1]).
References:
[1] : "DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement",
Yanxin Hu et al.
https://arxiv.org/abs/2008.00264
"""
def __init__(self, in_size, hid_size=128, rnn_type="LSTM", norm_type=None):
super().__init__()
self.rnn = complex_nn.ComplexMultiplicationWrapper(SingleRNN, rnn_type, in_size, hid_size)
self.linear = complex_nn.ComplexMultiplicationWrapper(nn.Linear, hid_size, in_size)
self.norm = norms.get_complex(norm_type)
def forward(self, x: complex_nn.ComplexTensor):
"""Input shape: [batch, ..., time]"""
# Remember x for skip connection
skip_conn = x
# Permute to [batch, time, ...]
x = x.permute(0, x.ndim - 1, *range(1, x.ndim - 1))
# RNN + Linear expect [batch, time, rest]
x = self.linear(self.rnn(x.reshape(*x.shape[:2], -1))).reshape(*x.shape)
# Permute back to [batch, ..., time]
x = x.permute(0, *range(2, x.ndim), 1)
if self.norm is not None:
x = self.norm(x)
return x + skip_conn
class DCCRMaskNet(BaseDCUMaskNet):
"""Masking part of DCCRNet, as proposed in [1].
Valid `architecture` values for the ``default_architecture`` classmethod are:
"DCCRN".
Args:
encoders (list of length `N` of tuples of (in_chan, out_chan, kernel_size, stride, padding)):
Arguments of encoders of the u-net
decoders (list of length `N` of tuples of (in_chan, out_chan, kernel_size, stride, padding))
Arguments of decoders of the u-net
n_freqs (int): Number of frequencies (dim 1) of input to ``.forward()`.
`n_freqs - 1` must be divisible by `f_0 * f_1 * ... * f_N` where `f_k` are
the frequency strides of the encoders.
References:
[1] : "DCCRN: Deep Complex Convolution Recurrent Network for Phase-Aware Speech Enhancement",
Yanxin Hu et al.
https://arxiv.org/abs/2008.00264
"""
_architectures = DCCRN_ARCHITECTURES
def __init__(self, encoders, decoders, n_freqs, **kwargs):
encoders_stride_prod = np.prod([enc_stride for _, _, _, enc_stride, _ in encoders], axis=0)
freq_prod, _ = encoders_stride_prod
last_encoder_out_shape = (encoders[-1][1], int(np.ceil(n_freqs / freq_prod)))
super().__init__(
encoders,
decoders,
intermediate_layer=DCCRMaskNetRNN(np.prod(last_encoder_out_shape)),
**kwargs,
)
self.n_freqs = n_freqs
| 37.697023
| 101
| 0.598513
|
2e7482ec37ff880c51b4b5982f2b13eb9c7a3523
| 8,800
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20200501/hub_virtual_network_connection.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20200501/hub_virtual_network_connection.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20200501/hub_virtual_network_connection.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['HubVirtualNetworkConnection']
class HubVirtualNetworkConnection(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_hub_to_remote_vnet_transit: Optional[pulumi.Input[bool]] = None,
allow_remote_vnet_to_use_hub_vnet_gateways: Optional[pulumi.Input[bool]] = None,
connection_name: Optional[pulumi.Input[str]] = None,
enable_internet_security: Optional[pulumi.Input[bool]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
remote_virtual_network: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
routing_configuration: Optional[pulumi.Input[pulumi.InputType['RoutingConfigurationArgs']]] = None,
virtual_hub_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
HubVirtualNetworkConnection Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_hub_to_remote_vnet_transit: Deprecated: VirtualHub to RemoteVnet transit to enabled or not.
:param pulumi.Input[bool] allow_remote_vnet_to_use_hub_vnet_gateways: Deprecated: Allow RemoteVnet to use Virtual Hub's gateways.
:param pulumi.Input[str] connection_name: The name of the HubVirtualNetworkConnection.
:param pulumi.Input[bool] enable_internet_security: Enable internet security.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] remote_virtual_network: Reference to the remote virtual network.
:param pulumi.Input[str] resource_group_name: The resource group name of the HubVirtualNetworkConnection.
:param pulumi.Input[pulumi.InputType['RoutingConfigurationArgs']] routing_configuration: The Routing Configuration indicating the associated and propagated route tables on this connection.
:param pulumi.Input[str] virtual_hub_name: The name of the VirtualHub.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['allow_hub_to_remote_vnet_transit'] = allow_hub_to_remote_vnet_transit
__props__['allow_remote_vnet_to_use_hub_vnet_gateways'] = allow_remote_vnet_to_use_hub_vnet_gateways
if connection_name is None:
raise TypeError("Missing required property 'connection_name'")
__props__['connection_name'] = connection_name
__props__['enable_internet_security'] = enable_internet_security
__props__['id'] = id
__props__['name'] = name
__props__['remote_virtual_network'] = remote_virtual_network
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['routing_configuration'] = routing_configuration
if virtual_hub_name is None:
raise TypeError("Missing required property 'virtual_hub_name'")
__props__['virtual_hub_name'] = virtual_hub_name
__props__['etag'] = None
__props__['provisioning_state'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:HubVirtualNetworkConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200601:HubVirtualNetworkConnection"), pulumi.Alias(type_="azure-nextgen:network/v20200701:HubVirtualNetworkConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(HubVirtualNetworkConnection, __self__).__init__(
'azure-nextgen:network/v20200501:HubVirtualNetworkConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'HubVirtualNetworkConnection':
"""
Get an existing HubVirtualNetworkConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return HubVirtualNetworkConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowHubToRemoteVnetTransit")
def allow_hub_to_remote_vnet_transit(self) -> pulumi.Output[Optional[bool]]:
"""
Deprecated: VirtualHub to RemoteVnet transit to enabled or not.
"""
return pulumi.get(self, "allow_hub_to_remote_vnet_transit")
@property
@pulumi.getter(name="allowRemoteVnetToUseHubVnetGateways")
def allow_remote_vnet_to_use_hub_vnet_gateways(self) -> pulumi.Output[Optional[bool]]:
"""
Deprecated: Allow RemoteVnet to use Virtual Hub's gateways.
"""
return pulumi.get(self, "allow_remote_vnet_to_use_hub_vnet_gateways")
@property
@pulumi.getter(name="enableInternetSecurity")
def enable_internet_security(self) -> pulumi.Output[Optional[bool]]:
"""
Enable internet security.
"""
return pulumi.get(self, "enable_internet_security")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the hub virtual network connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="remoteVirtualNetwork")
def remote_virtual_network(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
Reference to the remote virtual network.
"""
return pulumi.get(self, "remote_virtual_network")
@property
@pulumi.getter(name="routingConfiguration")
def routing_configuration(self) -> pulumi.Output[Optional['outputs.RoutingConfigurationResponse']]:
"""
The Routing Configuration indicating the associated and propagated route tables on this connection.
"""
return pulumi.get(self, "routing_configuration")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 48.888889
| 299
| 0.684091
|
77b67570992a708dc6e6376bc552b253f0ad87c4
| 11,914
|
py
|
Python
|
chb/mips/MIPSFunction.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
chb/mips/MIPSFunction.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
chb/mips/MIPSFunction.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2020-2021 Henny Sipma
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
import hashlib
import xml.etree.ElementTree as ET
from typing import Callable, cast, Dict, List, Mapping, Optional, Sequence, Tuple
from chb.api.InterfaceDictionary import InterfaceDictionary
from chb.app.BasicBlock import BasicBlock
from chb.app.BDictionary import BDictionary
from chb.app.Cfg import Cfg
from chb.app.Function import Function
from chb.app.FunctionInfo import FunctionInfo
from chb.app.FunctionDictionary import FunctionDictionary
from chb.app.StringXRefs import StringsXRefs
from chb.invariants.FnVarDictionary import FnVarDictionary
from chb.invariants.FnInvDictionary import FnInvDictionary
from chb.invariants.FnInvariants import FnInvariants
from chb.invariants.XVariable import XVariable
from chb.invariants.XXpr import XXpr
from chb.mips.MIPSDictionary import MIPSDictionary
from chb.mips.MIPSBlock import MIPSBlock
from chb.mips.MIPSCfg import MIPSCfg
from chb.mips.MIPSInstruction import MIPSInstruction
from chb.models.ModelsAccess import ModelsAccess
import chb.simulation.SimUtil as SU
import chb.util.fileutil as UF
class MIPSFunction(Function):
def __init__(self,
path: str,
filename: str,
bd: BDictionary,
ixd: InterfaceDictionary,
finfo: FunctionInfo,
mipsd: MIPSDictionary,
stringsxrefs: StringsXRefs,
names: Sequence[str],
models: ModelsAccess,
xnode: ET.Element) -> None:
Function.__init__(
self, path, filename, bd, ixd, finfo, stringsxrefs, names, xnode)
self._mipsd = mipsd
self._models = models
self._blocks: Dict[str, MIPSBlock] = {}
self._cfg: Optional[MIPSCfg] = None
self._mipsfnd: Optional[FunctionDictionary] = None
self._addressreference: Dict[str, str] = {}
@property
def models(self) -> ModelsAccess:
return self._models
@property
def dictionary(self) -> MIPSDictionary:
return self._mipsd
@property
def functiondictionary(self) -> FunctionDictionary:
if self._mipsfnd is None:
xfnd = self.xnode.find("instr-dictionary")
if xfnd is None:
raise UF.CHBError("Element instr-dictionary missing from xml")
self._mipsfnd = FunctionDictionary(self, xfnd)
return self._mipsfnd
@property
def blocks(self) -> Dict[str, MIPSBlock]:
if len(self._blocks) == 0:
xinstrs = self.xnode.find("instructions")
if xinstrs is None:
raise UF.CHBError(
"Xml element instructions missing from function xml")
for b in xinstrs.findall("bl"):
baddr = b.get("ba")
if baddr is None:
raise UF.CHBError("Block address is missing from xml")
self._blocks[baddr] = MIPSBlock(self, b)
return self._blocks
@property
def instructions(self) -> Mapping[str, MIPSInstruction]:
result: Dict[str, MIPSInstruction] = {}
def f(baddr: str, block: MIPSBlock) -> None:
result.update(block.instructions)
self.iter_blocks(f)
return result
def iter_blocks(self, f: Callable[[str, MIPSBlock], None]) -> None:
for (ba, block) in self.blocks.items():
mipsblock = cast(MIPSBlock, block)
f(ba, mipsblock)
def iter_instructions(self, f: Callable[[str, MIPSInstruction], None]) -> None:
for (ia, instr) in self.instructions.items():
mipsinstr = cast(MIPSInstruction, instr)
f(ia, mipsinstr)
@property
def branchconditions(self) -> Mapping[str, MIPSInstruction]:
result: Dict[str, MIPSInstruction] = {}
for b in self.blocks.values():
lastinstr = b.last_instruction
if lastinstr.is_branch_instruction:
ftconditions = lastinstr.ft_conditions
if len(ftconditions) > 0:
result[b.baddr] = cast(MIPSInstruction, lastinstr)
return result
def set_fnvar_dictionary(self, xnode: ET.Element) -> FnVarDictionary:
return FnVarDictionary(self, xnode)
@property
def cfg(self) -> MIPSCfg:
if self._cfg is None:
xcfg = self.xnode.find("cfg")
if xcfg is None:
raise UF.CHBError("Element cfg missing from function xml")
self._cfg = MIPSCfg(self, xcfg)
return self._cfg
@property
def address_reference(self) -> Mapping[str, str]:
"""Return map of addr -> block addr."""
if len(self._addressreference) == 0:
result: Dict[str, str] = {}
def add(baddr: str, block: MIPSBlock) -> None:
for a in block.instructions:
result[a] = baddr
self.iter_blocks(add)
self._addressreference = result
return self._addressreference
def byte_string(self, chunksize: Optional[int] = None) -> str:
s: List[str] = []
def f(ia: str, i: MIPSInstruction) -> None:
s.extend(i.bytestring)
self.iter_instructions(f)
if chunksize is None:
return ''.join(s)
else:
sresult = ''.join(s)
size = len(sresult)
chunks = [sresult[i: i + chunksize] for i in range(0, size, chunksize)]
return '\n'.join(chunks)
def calls_to_app_function(self, tgtaddr: str) -> List[MIPSInstruction]:
result: List[MIPSInstruction] = []
def f(iaddr: str, instr: MIPSInstruction) -> None:
if instr.is_call_to_app_function(tgtaddr):
result.append(instr)
self.iter_instructions(f)
return result
def load_word_instructions(self) -> List[MIPSInstruction]:
result: List[MIPSInstruction] = []
def f(iaddr: str, instr: MIPSInstruction) -> None:
if instr.is_load_word_instruction:
result.append(instr)
self.iter_instructions(f)
return result
def store_word_instructions(self) -> List[MIPSInstruction]:
result: List[MIPSInstruction] = []
def f(iaddr: str, instr: MIPSInstruction) -> None:
if instr.is_store_word_instruction:
result.append(instr)
self.iter_instructions(f)
return result
def restore_register_instructions(self) -> List[MIPSInstruction]:
result: List[MIPSInstruction] = []
def f(iaddr: str, instr: MIPSInstruction) -> None:
if instr.is_restore_register_instruction:
result.append(instr)
self.iter_instructions(f)
return result
def call_instructions_to_target(self, tgt: str) -> List[MIPSInstruction]:
"""Returns a list of instructions that are calls to the given function."""
result: List[MIPSInstruction] = []
def f(iaddr: str, instr: MIPSInstruction) -> None:
if instr.is_call_instruction:
if str(instr.call_target) == tgt:
result.append(instr)
self.iter_instructions(f)
return result
def global_refs(self) -> Tuple[List[XVariable], List[XXpr]]:
lhsresult: List[XVariable] = []
rhsresult: List[XXpr] = []
def f(iaddr: str, instr: MIPSInstruction) -> None:
(lhs, rhs) = instr.global_refs()
lhsresult.extend(lhs)
rhsresult.extend(rhs)
self.iter_instructions(f)
return (lhsresult, rhsresult)
def strings_referenced(self) -> List[str]:
result: List[str] = []
def f(iaddr: str, instr: MIPSInstruction) -> None:
result.extend(instr.strings_referenced)
self.iter_instructions(f)
return result
# returns a dictionary of gvar -> count
def global_variables(self) -> Dict[str, int]:
result: Dict[str, int] = {}
def f(iaddr: str, instr: MIPSInstruction) -> None:
iresult = instr.global_variables()
for gv in iresult:
result.setdefault(gv, 0)
result[gv] += iresult[gv]
self.iter_instructions(f)
return result
# returns a dictionary of registers used in the function (name -> variable)
def registers(self) -> Dict[str, str]:
result: Dict[str, str] = {}
def f(iaddr: str, instr: MIPSInstruction) -> None:
iresult = instr.registers()
for r in iresult:
result.setdefault(r, iresult[r])
self.iter_instructions(f)
return result
def return_instructions(self) -> List[MIPSInstruction]:
result: List[MIPSInstruction] = []
def f(iaddr: str, instr: MIPSInstruction) -> None:
if instr.is_return_instruction:
result.append(instr)
self.iter_instructions(f)
return result
def jump_conditions(self) -> Dict[str, Dict[str, str]]:
return self.cfg.conditions()
def to_sliced_string(self, registers: List[str]) -> str:
lines: List[str] = []
for b in sorted(self.blocks):
looplevels = self.cfg.loop_levels(self.blocks[b].baddr)
blocklines = self.blocks[b].to_sliced_string(registers, len(looplevels))
if len(blocklines) > 0:
lines.append(blocklines)
else:
lines.append(
str(self.blocks[b].baddr).rjust(10)
+ ' '
+ ('L' * len(looplevels)))
lines.append('-' * 80)
return '\n'.join(lines)
def to_string(
self,
bytes: bool = False,
bytestring: bool = False,
hash: bool = False,
opcodetxt: bool = True,
opcodewidth: int = 25,
sp: bool = True) -> str:
lines: List[str] = []
for b in sorted(self.blocks):
lines.append(
self.blocks[b].to_string(
bytes=bytes,
opcodetxt=opcodetxt,
opcodewidth=opcodewidth,
sp=sp))
lines.append('-' * 80)
if bytestring:
lines.append(self.byte_string(chunksize=80))
if hash:
lines.append('hash: ' + self.md5)
return '\n'.join(lines)
def __str__(self) -> str:
return self.to_string()
| 35.041176
| 84
| 0.601309
|
5263e3b833251d9fee3dcce81470734ad051223a
| 5,829
|
py
|
Python
|
tests/test_easel/test_msa.py
|
RuneBlaze/pyhmmer
|
9d013d23b3e276c0b63ed32f1e646c6a26c2c578
|
[
"MIT"
] | null | null | null |
tests/test_easel/test_msa.py
|
RuneBlaze/pyhmmer
|
9d013d23b3e276c0b63ed32f1e646c6a26c2c578
|
[
"MIT"
] | null | null | null |
tests/test_easel/test_msa.py
|
RuneBlaze/pyhmmer
|
9d013d23b3e276c0b63ed32f1e646c6a26c2c578
|
[
"MIT"
] | null | null | null |
import copy
import functools
import gc
import io
import os
import unittest
import tempfile
from pyhmmer import easel
class TestMSA(object):
@classmethod
def setUpClass(cls):
cls.formats_folder = os.path.realpath(
os.path.join(
__file__, os.pardir, os.pardir, os.pardir, "vendor", "easel", "formats"
)
)
def test_write_roundtrip_stockholm(self):
sto = os.path.join(self.formats_folder, "stockholm.1")
msa = self.read_msa(sto)
with io.BytesIO() as buffer:
msa.write(buffer, "stockholm")
actual = buffer.getvalue().decode()
with open(sto) as f:
expected = f.read()
self.assertMultiLineEqual(actual, expected)
def test_write_invalid_format(self):
msa = easel.TextMSA()
self.assertRaises(ValueError, msa.write, io.BytesIO(), "invalidformat")
def test_sequences(self):
sto = os.path.join(self.formats_folder, "stockholm.1")
msa = self.read_msa(sto)
self.assertEqual(len(msa.sequences), 2)
self.assertEqual(msa.sequences[0].name, b"seq1")
self.assertEqual(msa.sequences[1].name, b"seq2")
def test_init_empty(self):
msa = self.MSA()
self.assertEqual(len(msa), 0)
self.assertEqual(len(msa.sequences), 0)
self.assertFalse(msa)
def test_init_name(self):
msa = self.MSA(name=b"ali")
self.assertEqual(msa.name, b"ali")
def test_init_description(self):
d = b"an alignment made from Python"
msa = self.MSA(description=d)
self.assertEqual(msa.description, d)
def test_init_author(self):
author = b"Martin Larralde"
msa = self.MSA(author=author)
self.assertEqual(msa.author, author)
def test_init_accession(self):
acc = b"TST001"
msa = self.MSA(accession=acc)
self.assertEqual(msa.accession, acc)
class TestTextMSA(TestMSA, unittest.TestCase):
MSA = easel.TextMSA
@staticmethod
def read_msa(sto):
with easel.MSAFile(sto, "stockholm") as msa_file:
return msa_file.read()
def test_eq(self):
s1 = easel.TextSequence(name=b"seq1", sequence="ATGC")
s2 = easel.TextSequence(name=b"seq2", sequence="ATGG")
msa = easel.TextMSA(sequences=[s1, s2])
msa2 = easel.TextMSA(sequences=[s1, s2])
self.assertEqual(msa, msa2)
msa2.name = b"other"
self.assertNotEqual(msa, msa2)
self.assertNotEqual(msa, 1)
self.assertNotEqual(msa, [s1, s2])
def test_eq_copy(self):
s1 = easel.TextSequence(name=b"seq1", sequence="ATGC")
s2 = easel.TextSequence(name=b"seq2", sequence="ATGG")
msa = easel.TextMSA(sequences=[s1, s2])
self.assertEqual(msa, msa.copy())
self.assertEqual(msa, copy.copy(msa))
def test_init_sequences(self):
s1 = easel.TextSequence(name=b"seq1", sequence="ATGC")
msa = easel.TextMSA(sequences=[s1])
self.assertEqual(len(msa), 4)
self.assertEqual(len(msa.sequences), 1)
self.assertTrue(msa)
def test_init_length_mismatch(self):
s1 = easel.TextSequence(name=b"seq1", sequence="ATGC")
s2 = easel.TextSequence(name=b"seq2", sequence="AT")
self.assertRaises(ValueError, easel.TextMSA, sequences=[s1, s2])
def test_init_duplicate_names(self):
s1 = easel.TextSequence(name=b"seq1", sequence="ATGC")
s2 = easel.TextSequence(name=b"seq1", sequence="ATTC")
self.assertRaises(ValueError, easel.TextMSA, sequences=[s1, s2])
class TestDigitalMSA(TestMSA, unittest.TestCase):
alphabet = easel.Alphabet.dna()
MSA = functools.partial(easel.DigitalMSA, alphabet)
@staticmethod
def read_msa(sto):
with easel.MSAFile(sto, "stockholm") as msa_file:
msa_file.set_digital(msa_file.guess_alphabet())
return msa_file.read()
def test_eq(self):
s1 = easel.DigitalSequence(self.alphabet, name=b"seq1", sequence=bytearray([1, 2, 3, 4]))
s2 = easel.DigitalSequence(self.alphabet, name=b"seq2", sequence=bytearray([1, 2, 3, 3]))
msa = easel.DigitalMSA(self.alphabet, sequences=[s1, s2])
msa2 = easel.DigitalMSA(self.alphabet, sequences=[s1, s2])
self.assertEqual(msa, msa2)
msa2.name = b"other"
self.assertNotEqual(msa, msa2)
self.assertNotEqual(msa, 1)
self.assertNotEqual(msa, [s1, s2])
def test_eq_copy(self):
s1 = easel.DigitalSequence(self.alphabet, name=b"seq1", sequence=bytearray([1, 2, 3, 4]))
s2 = easel.DigitalSequence(self.alphabet, name=b"seq2", sequence=bytearray([1, 2, 3, 3]))
msa = easel.DigitalMSA(self.alphabet, sequences=[s1, s2])
self.assertEqual(msa, msa.copy())
self.assertEqual(msa, copy.copy(msa))
def test_init_sequences(self):
s1 = easel.DigitalSequence(self.alphabet, name=b"seq1", sequence=bytearray([1, 2, 3, 4]))
msa = easel.DigitalMSA(self.alphabet, sequences=[s1])
self.assertEqual(len(msa), 4)
self.assertEqual(len(msa.sequences), 1)
self.assertTrue(msa)
def test_init_length_mismatch(self):
s1 = easel.DigitalSequence(self.alphabet, name=b"seq1", sequence=bytearray([1, 2, 3, 4]))
s2 = easel.DigitalSequence(self.alphabet, name=b"seq2", sequence=bytearray([1, 2]))
self.assertRaises(ValueError, easel.DigitalMSA, self.alphabet, sequences=[s1, s2])
def test_init_duplicate_names(self):
s1 = easel.DigitalSequence(self.alphabet, name=b"seq1", sequence=bytearray([1, 2, 3, 4]))
s2 = easel.DigitalSequence(self.alphabet, name=b"seq1", sequence=bytearray([1, 2, 3, 3]))
self.assertRaises(ValueError, easel.DigitalMSA, self.alphabet, sequences=[s1, s2])
| 35.981481
| 97
| 0.641276
|
ded6795e7aa7eb09183f76ceeed6b7f3f3f4c2d3
| 9,895
|
py
|
Python
|
tests/test_module_calls.py
|
kalaspuff/utcnow
|
a2b0341f4b8a97b0dbd92b83ebac60be83aff0c3
|
[
"MIT"
] | null | null | null |
tests/test_module_calls.py
|
kalaspuff/utcnow
|
a2b0341f4b8a97b0dbd92b83ebac60be83aff0c3
|
[
"MIT"
] | 34
|
2021-02-18T09:36:19.000Z
|
2022-03-28T21:10:25.000Z
|
tests/test_module_calls.py
|
kalaspuff/utcnow
|
a2b0341f4b8a97b0dbd92b83ebac60be83aff0c3
|
[
"MIT"
] | null | null | null |
import datetime
def test_module() -> None:
import utcnow
# Test types
assert type(utcnow) is utcnow._module
assert len(str(utcnow)) == 27
assert isinstance(repr(utcnow), str)
assert len(repr(utcnow)) == 27
# Modules aren't callable, but this one is – it's frowned upon and bad practice.
assert utcnow("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow("1984-08-01 00:00:00") == "1984-08-01T00:00:00.000000Z"
assert utcnow("1984-08-01 12:00:00") != "1984-08-01T00:00:00.000000Z"
assert datetime.datetime.strptime(utcnow(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(str(utcnow), "%Y-%m-%dT%H:%M:%S.%f%z")
assert utcnow(datetime.datetime(2021, 4, 30, 8, 0)) == "2021-04-30T08:00:00.000000Z"
# Testing module functions
assert utcnow.utcnow("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.utcnow("1984-08-01 00:00:00") == "1984-08-01T00:00:00.000000Z"
assert utcnow.utcnow("1984-08-01 12:00:00") != "1984-08-01T00:00:00.000000Z"
assert utcnow.as_string("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.as_str("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.as_rfc3339("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.to_string("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.to_str("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.to_rfc3339("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.get_string("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.get_str("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.get_rfc3339("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.get("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.string("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.str("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.rfc3339("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.utcnow("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.utcnow.as_string("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.utcnow.as_str("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.utcnow.string("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow.utcnow.str("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert datetime.datetime.strptime(utcnow.utcnow(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(utcnow.as_string(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(utcnow.as_str(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(utcnow.string(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(utcnow.str(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(utcnow.utcnow.as_string(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(utcnow.utcnow.as_str(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(utcnow.utcnow.string(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(utcnow.utcnow.str(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(str(utcnow.utcnow), "%Y-%m-%dT%H:%M:%S.%f%z")
assert utcnow.utcnow(datetime.datetime(2021, 4, 30, 8, 0)) == "2021-04-30T08:00:00.000000Z"
assert utcnow.as_datetime("2021-04-30T08:00:10.000000Z") == datetime.datetime(
2021, 4, 30, 8, 0, 10, tzinfo=datetime.timezone.utc
)
assert utcnow.as_date("2021-04-30T08:00:10.000000Z") == datetime.datetime(
2021, 4, 30, 8, 0, 10, tzinfo=datetime.timezone.utc
)
assert utcnow.to_datetime("2021-04-30T08:00:10.000000Z") == datetime.datetime(
2021, 4, 30, 8, 0, 10, tzinfo=datetime.timezone.utc
)
assert utcnow.to_date("2021-04-30T08:00:10.000000Z") == datetime.datetime(
2021, 4, 30, 8, 0, 10, tzinfo=datetime.timezone.utc
)
assert utcnow.get_datetime("2021-04-30T08:00:10.000000Z") == datetime.datetime(
2021, 4, 30, 8, 0, 10, tzinfo=datetime.timezone.utc
)
assert utcnow.get_date("2021-04-30T08:00:10.000000Z") == datetime.datetime(
2021, 4, 30, 8, 0, 10, tzinfo=datetime.timezone.utc
)
assert utcnow.datetime("2021-04-30T08:00:10.000000Z") == datetime.datetime(
2021, 4, 30, 8, 0, 10, tzinfo=datetime.timezone.utc
)
assert utcnow.date("2021-04-30T08:00:10.000000Z") == datetime.datetime(
2021, 4, 30, 8, 0, 10, tzinfo=datetime.timezone.utc
)
assert utcnow.utcnow.as_datetime("2021-04-30T08:00:10.000000Z") == datetime.datetime(
2021, 4, 30, 8, 0, 10, tzinfo=datetime.timezone.utc
)
assert utcnow.utcnow.as_date("2021-04-30T08:00:10.000000Z") == datetime.datetime(
2021, 4, 30, 8, 0, 10, tzinfo=datetime.timezone.utc
)
assert utcnow.utcnow.datetime("2021-04-30T08:00:10.000000Z") == datetime.datetime(
2021, 4, 30, 8, 0, 10, tzinfo=datetime.timezone.utc
)
assert utcnow.utcnow.date("2021-04-30T08:00:10.000000Z") == datetime.datetime(
2021, 4, 30, 8, 0, 10, tzinfo=datetime.timezone.utc
)
# Unixtime
assert isinstance(utcnow.as_string(), str)
assert isinstance(utcnow.as_datetime(), datetime.datetime)
assert isinstance(utcnow.as_unixtime(), (int, float))
assert isinstance(utcnow.as_unixtime(0), (int, float))
assert utcnow.as_unixtime(0) == 0
assert utcnow.as_unixtime(1) == 1
assert utcnow.as_unixtime(-1) == -1
for i in range(-1000, 1000):
assert utcnow.as_unixtime(i * 4711) == i * 4711
assert utcnow.as_unixtime(i * 4711) == i * 4711
assert utcnow.as_unixtime(i * 4711 + 0.1) == i * 4711 + 0.1
assert utcnow.as_unixtime("2021-02-27T06:22:00") == 1614406920.0
assert utcnow.as_unixtime("2021-02-27T06:22:00.1") == 1614406920.1
assert utcnow.as_unixtime("2021-02-27T06:22:00.001000") == 1614406920.001
assert utcnow.as_unixtime("2021-02-27T06:22:00.000000") == 1614406920.0
assert utcnow.as_unixtime("2021-02-27T06:22:00.000000") == 1614406920.0
assert utcnow.as_unixtime("2021-02-27T06:22:00.000009") == 1614406920.000009
# Timezone test
assert utcnow.as_datetime("2021-04-30T09:00:00.000000+01:00") == datetime.datetime(
2021, 4, 30, 8, 0, tzinfo=datetime.timezone.utc
)
assert utcnow.as_datetime("2021-04-30T08:00:00.000000+00:00") == datetime.datetime(
2021, 4, 30, 8, 0, tzinfo=datetime.timezone.utc
)
assert utcnow.as_datetime("2021-04-30T08:00:00.000000-00:00") == datetime.datetime(
2021, 4, 30, 8, 0, tzinfo=datetime.timezone.utc
)
assert utcnow.as_datetime("2021-04-30T07:00:00.000000-01:00") == datetime.datetime(
2021, 4, 30, 8, 0, tzinfo=datetime.timezone.utc
)
assert utcnow.as_datetime("2021-04-30T08:00:00.000000 UTC") == datetime.datetime(
2021, 4, 30, 8, 0, tzinfo=datetime.timezone.utc
)
assert utcnow.as_datetime("2021-04-30T08:00:00.000000") == datetime.datetime(
2021, 4, 30, 8, 0, tzinfo=datetime.timezone.utc
)
assert utcnow.as_datetime("2021-04-30T08:00:00.000000 UTC") != datetime.datetime(2021, 4, 30, 8, 0)
assert utcnow.as_datetime("2021-04-30T08:00:00.000000 UTC") == utcnow.as_datetime(
datetime.datetime(2021, 4, 30, 8, 0)
)
assert utcnow.as_datetime("2021-04-30T08:00:00.000000 UTC") != utcnow.as_datetime(
datetime.datetime(2021, 4, 30, 8, 1)
)
assert utcnow.as_string(utcnow.as_datetime("2021-04-30T08:00:00.000000 UTC")) == utcnow.utcnow(
utcnow.as_datetime(datetime.datetime(2021, 4, 30, 8, 0))
)
# Testing function imports
from utcnow import as_str, as_string
from utcnow import str as str_
from utcnow import string
assert as_string("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert as_str("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert string("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert str_("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert datetime.datetime.strptime(as_string(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(as_str(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(string(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(str_(), "%Y-%m-%dT%H:%M:%S.%f%z")
# Testing submodule import with function calls
from utcnow import utcnow as utcnow_
assert utcnow_("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow_.as_string("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow_.as_str("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow_.string("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert utcnow_.str("1984-08-01") == "1984-08-01T00:00:00.000000Z"
assert datetime.datetime.strptime(utcnow_(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(utcnow_.as_string(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(utcnow_.as_str(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(utcnow_.string(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(utcnow_.str(), "%Y-%m-%dT%H:%M:%S.%f%z")
assert datetime.datetime.strptime(str(utcnow_), "%Y-%m-%dT%H:%M:%S.%f%z")
assert utcnow_(datetime.datetime(2021, 4, 30, 8, 0)) == "2021-04-30T08:00:00.000000Z"
assert utcnow_.as_datetime("2021-04-30T08:00:10.000000Z") == datetime.datetime(
2021, 4, 30, 8, 0, 10, tzinfo=datetime.timezone.utc
)
assert utcnow_.as_date("2021-04-30T08:00:10.000000Z") == datetime.datetime(
2021, 4, 30, 8, 0, 10, tzinfo=datetime.timezone.utc
)
assert utcnow_.datetime("2021-04-30T08:00:10.000000Z") == datetime.datetime(
2021, 4, 30, 8, 0, 10, tzinfo=datetime.timezone.utc
)
assert utcnow_.date("2021-04-30T08:00:10.000000Z") == datetime.datetime(
2021, 4, 30, 8, 0, 10, tzinfo=datetime.timezone.utc
)
| 54.071038
| 103
| 0.657201
|
6b0a6d900c93b192795d7055eeaaa4a65e72dfcd
| 2,242
|
py
|
Python
|
tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/utils/registry.py
|
hito0512/Vitis-AI
|
996459fb96cb077ed2f7e789d515893b1cccbc95
|
[
"Apache-2.0"
] | 848
|
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/utils/registry.py
|
wangyifan778/Vitis-AI
|
f61061eef7550d98bf02a171604c9a9f283a7c47
|
[
"Apache-2.0"
] | 656
|
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tools/Vitis-AI-Quantizer/vai_q_pytorch/nndct_shared/utils/registry.py
|
wangyifan778/Vitis-AI
|
f61061eef7550d98bf02a171604c9a9f283a7c47
|
[
"Apache-2.0"
] | 506
|
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
#
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Registry mechanism for "registering" classes/functions for general use.
This is typically used with a decorator that calls Register for adding
a class or function to a registry.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Registry(object):
"""Provides a registry for saving objects."""
def __init__(self, name):
"""Creates a new registry."""
self._name = name
self._registry = {}
def register(self, obj, name=None):
"""Registers a Python object "obj" for the given "name".
Args:
obj: The object to add to the registry.
name: An optional string specifying the registry key for the obj.
If None, obj.__name__ will be used.
Raises:
KeyError: If same name is registered twice.
"""
if not name:
name = obj.__name__
if name in self._registry:
raise KeyError("Name '%s' has been registered in '%s'!" %
(name, self._name))
# logging.vlog(1, "Registering %s (%s) in %s.", name, obj, self._name)
self._registry[name] = obj
def list(self):
"""Lists registered items.
Returns:
A list of names of registered objects.
"""
return self._registry.keys()
def lookup(self, name):
"""Looks up "name".
Args:
name: a string specifying the registry key for the obj.
Returns:
Registered object if found
Raises:
LookupError: if "name" has not been registered.
"""
if name in self._registry:
return self._registry[name]
else:
raise LookupError("%s registry has no entry for: %s" % (self._name, name))
| 29.116883
| 80
| 0.67752
|
c1cf126a34cbab1157f4cab2fa22fa69dd133583
| 4,515
|
py
|
Python
|
src/main/python/bayou/server/search_server.py
|
rohan2606/bayou
|
eca7a6628aa5942aeb4b57f684013c7e15ccb171
|
[
"Apache-2.0"
] | 1
|
2018-02-19T21:48:06.000Z
|
2018-02-19T21:48:06.000Z
|
src/main/python/bayou/server/search_server.py
|
rohan2606/bayou
|
eca7a6628aa5942aeb4b57f684013c7e15ccb171
|
[
"Apache-2.0"
] | null | null | null |
src/main/python/bayou/server/search_server.py
|
rohan2606/bayou
|
eca7a6628aa5942aeb4b57f684013c7e15ccb171
|
[
"Apache-2.0"
] | 1
|
2019-02-07T20:26:36.000Z
|
2019-02-07T20:26:36.000Z
|
# Copyright 2017 Rice University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import numpy as np
import tensorflow as tf
import argparse
import os
import sys
import json
import textwrap
import socket
import time
import bayou.models.low_level_evidences.predict
File_Name = 'Search_Data_Basic'
HELP = """ Help me! :( """
#%%
TCP_IP = '127.0.0.1'
TCP_PORT = 5005
BUFFER_SIZE = 1024 #Normally 1024, but we want fast response
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
def search_server(clargs):
#set clargs.continue_from = True while testing, it continues from old saved config
clargs.continue_from = True
print('Loading Model, please wait _/\_ ...')
model = bayou.models.low_level_evidences.predict.BayesianPredictor
with tf.Session() as sess:
predictor = model(clargs.save, sess) # goes to predict.BayesianPredictor
print ('Model Loaded, All Ready to Predict Evidences!!')
while True:
print("\n\n Waiting for a new connection!")
s.listen(1)
conn, addr = s.accept()
print ('Connection address:', addr)
while True:
data = conn.recv(BUFFER_SIZE)
if not data: break
with open('/home/ubuntu/QueryProg.json', 'r') as f:
js = json.load(f)
a1, b1 = predictor.get_a1b1(js['programs'][0])
# evSigmas = predictor.get_ev_sigma(js['programs'][0])
# print(evSigmas)
# program = jsp[0]
# We do not need other paths in the program as all the evidences are the same for all the paths
# and for new test code we are only interested in the evidence encodings
# a1, a2 and ProbY are all scalars, b1 and b2 are vectors
programs = []
program = {}
program['a1'] = a1[0].item() # .item() converts a numpy element to a python element, one that is JSON serializable
program['b1'] = [val.item() for val in b1[0]]
programs.append(program)
print('\nWriting to {}...'.format('/home/ubuntu/QueryProgWEncoding.json'), end='\n')
with open('/home/ubuntu/QueryProgWEncoding.json', 'w') as f:
json.dump({'programs': programs}, fp=f, indent=2)
print ("Received data from client:", data)
conn.send(data) # echo
return
#%%
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(HELP))
# parser.add_argument('input_file', type=str, nargs=1,
# help='input data file')
parser.add_argument('--python_recursion_limit', type=int, default=10000,
help='set recursion limit for the Python interpreter')
parser.add_argument('--save', type=str, default='savedSearchModel',
help='checkpoint model during training here')
parser.add_argument('--evidence', type=str, default='all',
choices=['apicalls', 'types', 'keywords', 'all'],
help='use only this evidence for inference queries')
parser.add_argument('--output_file', type=str, default=None,
help='output file to print probabilities')
#clargs = parser.parse_args()
clargs = parser.parse_args()
# [
# # '..\..\..\..\..\..\data\DATA-training-top.json'])
# #'/home/rm38/Research/Bayou_Code_Search/Corpus/DATA-training-expanded-biased-TOP.json'])
# # '/home/ubuntu/Corpus/DATA-training-expanded-biased.json'])
# '/home/ubuntu/QueryProg.json'])
sys.setrecursionlimit(clargs.python_recursion_limit)
search_server(clargs)
| 38.262712
| 131
| 0.609524
|
7d13ec589414d2b765fd3193027ddf4dd1eb2cc3
| 3,168
|
py
|
Python
|
Hanicam/FACE_KNOWN/faces/recognizer.py
|
ArianeFire/HaniCam
|
8a940486a613d680a0b556209a596cdf3eb71f53
|
[
"MIT"
] | null | null | null |
Hanicam/FACE_KNOWN/faces/recognizer.py
|
ArianeFire/HaniCam
|
8a940486a613d680a0b556209a596cdf3eb71f53
|
[
"MIT"
] | null | null | null |
Hanicam/FACE_KNOWN/faces/recognizer.py
|
ArianeFire/HaniCam
|
8a940486a613d680a0b556209a596cdf3eb71f53
|
[
"MIT"
] | null | null | null |
import cv2
import cv
import face
#Function deleting the path link
def deleteLink(link):
lien = str(link)
lien = lien.replace('/home/seydou/Hanicam/FACE_KNOWN/faces/','')
lien = lien.replace('.bmp','')
#Suppression des indices des noms
for i in range(0,11) :
lien = lien.replace(str(i), "")
return lien
#Function writing the lists of students in txt file
def writeInFile(listes):
fichier = open('/opt/lampp/htdocs/www/Hancam/ressources/liste.txt', 'w')
#for line in listes :
fichier.write(listes)
fichier.close();
#Function Allowing Script to start
def allowScript() :
fichier1 = open("script/info.txt", "w");
fichier1.write("oui"+"\n");
fichier1.write("oui");
fichier1.close();
#STARTING REGNITION
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
listes = []
listes_set = set(listes)
nb_p = 2
path = "/home/seydou/Hanicam/FACE_KNOWN/s"
distances = []
results = []
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
#Save Frame
cv2.imwrite('detect/visage1.bmp', frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(80, 100),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
#for (x, y, w, h) in faces:
# cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Draw a rectangle around the faces
if len(faces) > 0 :
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
img = cv.LoadImage('detect/visage1.bmp')
image = cv.GetSubRect(img, (x + 20, y + 20, w - 30, h-30))
cv.SaveImage("detect/visage.bmp", image)
#Resize image
org_img = cv2.imread('detect/visage.bmp')
res_img = cv2.resize(org_img, (80, 100), interpolation = cv2.INTER_CUBIC)
cv2.imwrite('detect/visage_res.bmp', res_img) #cv2.cvtColor(res_img, cv2.COLOR_BGR2GRAY)
#Perform Recognition
collections = []
for i in range(0,2) :
collections.add(face.Collection(path+str(i)))
visage = face.Tableau('detect/visage_res.bmp')
for collection in collections :
rets = collection.reconnaitre(visage)
resultats.add(rets)
distances.add(rets[0])
indice = min(distances)
rets = resultats[indice]
print "Dsitance : ", rets[0]
if rets[1] != None :
print "Lien : ", rets[1].getLien()
nom = deleteLink(rets[1].getLien())
if nom not in listes_set :
writeInFile(nom)
listes_set.add(nom)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
#allowScript()
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
| 29.333333
| 101
| 0.580808
|
27b4a2b8f8e0bb2a910a3236527b3c34a591cfd9
| 100
|
py
|
Python
|
oss_server/web_interface/apps.py
|
PssbleTrngle/oss-server
|
e1b426e8c6411cbaa61af9a6d0ad0d892d6922d9
|
[
"MIT"
] | 36
|
2021-01-18T11:58:17.000Z
|
2021-12-09T21:40:29.000Z
|
oss_server/web_interface/apps.py
|
PssbleTrngle/oss-server
|
e1b426e8c6411cbaa61af9a6d0ad0d892d6922d9
|
[
"MIT"
] | 35
|
2020-03-25T11:52:13.000Z
|
2022-03-12T00:28:46.000Z
|
oss_server/web_interface/apps.py
|
PssbleTrngle/oss-server
|
e1b426e8c6411cbaa61af9a6d0ad0d892d6922d9
|
[
"MIT"
] | 7
|
2021-08-09T02:21:49.000Z
|
2022-02-06T11:04:04.000Z
|
from django.apps import AppConfig
class WebInterfaceConfig(AppConfig):
name = 'web_interface'
| 16.666667
| 36
| 0.78
|
45971d26125097e52d05efcec5ed6a262c927f9d
| 3,227
|
py
|
Python
|
lab-day/LabDayBackend/labday_api/views.py
|
JanStoltman/LabDayBackend
|
6c960385b18dad86424e8e619c19bdb6c4d686ea
|
[
"MIT"
] | 1
|
2018-02-09T10:49:17.000Z
|
2018-02-09T10:49:17.000Z
|
lab-day/LabDayBackend/labday_api/views.py
|
JanStoltman/LabDayBackend
|
6c960385b18dad86424e8e619c19bdb6c4d686ea
|
[
"MIT"
] | null | null | null |
lab-day/LabDayBackend/labday_api/views.py
|
JanStoltman/LabDayBackend
|
6c960385b18dad86424e8e619c19bdb6c4d686ea
|
[
"MIT"
] | null | null | null |
from itertools import chain
from operator import attrgetter
from django.http import HttpResponse
import json
from django.utils.crypto import get_random_string
from drf_multiple_model.views import ObjectMultipleModelAPIView
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from django.http import HttpResponseForbidden
from rest_framework.views import APIView
from rest_framework.authtoken.views import ObtainAuthToken
from .permissions import IsAdminOrReadOnly
from .serializers import *
class Speakers(viewsets.ModelViewSet):
queryset = Speaker.objects.all()
serializer_class = SpeakerSerializer
permission_classes = (IsAdminOrReadOnly, IsAuthenticated,)
class Events(viewsets.ModelViewSet):
queryset = Event.objects.all()
serializer_class = EventSerializer
permission_classes = (IsAdminOrReadOnly, IsAuthenticated,)
class Places(viewsets.ModelViewSet):
queryset = Place.objects.all()
serializer_class = PlaceSerializer
permission_classes = (IsAdminOrReadOnly, IsAuthenticated,)
class Paths(viewsets.ModelViewSet):
queryset = Path.objects.all()
serializer_class = PathSerializer
permission_classes = (IsAdminOrReadOnly, IsAuthenticated,)
class Timetables(viewsets.ModelViewSet):
queryset = Timetable.objects.all()
serializer_class = TimetableSerializer
permission_classes = (IsAdminOrReadOnly, IsAuthenticated,)
class AppData(ObjectMultipleModelAPIView):
querylist = [
{'queryset': Event.objects.all(), 'serializer_class': EventSerializer, 'label': 'events'},
{'queryset': Speaker.objects.all(), 'serializer_class': SpeakerSerializer, 'label': 'speakers'},
{'queryset': Place.objects.all(), 'serializer_class': PlaceSerializer, 'label': 'places'},
{'queryset': Path.objects.all(), 'serializer_class': PathSerializer, 'label': 'paths'},
{'queryset': Timetable.objects.all(), 'serializer_class': TimetableSerializer, 'label': 'timetables'}
]
permission_classes = (IsAdminOrReadOnly, IsAuthenticated,)
class LastUpdate(APIView):
permission_classes = (IsAdminOrReadOnly,)
def get(self, request):
queryset = sorted(
chain(Event.objects.all(), Speaker.objects.all(), Place.objects.all(),
Path.objects.all(), Timetable.objects.all()),
key=attrgetter('updated_at'),
reverse=True
)
return Response({'updated_at': queryset[0].updated_at})
class ObtainToken(ObtainAuthToken):
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context={'request': request})
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
# Change password so each login/password can be used only once
if not (user.username == 'test' or request.user.is_staff):
user.set_password(get_random_string(32))
user.userDetails.password_used = True
user.save()
return Response({'token': token.key})
| 36.670455
| 109
| 0.718314
|
b0a43c03598107f65326fd13643e73d386dddacb
| 1,338
|
py
|
Python
|
userbot/plugins/divertente.py
|
Fregiant16/fregiantuserbot
|
6cb23022a1dfa66551c5ded1928d9fded16e0684
|
[
"MIT"
] | 1
|
2020-04-14T15:19:47.000Z
|
2020-04-14T15:19:47.000Z
|
userbot/plugins/divertente.py
|
Fregiant16/fregiantuserbot
|
6cb23022a1dfa66551c5ded1928d9fded16e0684
|
[
"MIT"
] | null | null | null |
userbot/plugins/divertente.py
|
Fregiant16/fregiantuserbot
|
6cb23022a1dfa66551c5ded1928d9fded16e0684
|
[
"MIT"
] | 2
|
2020-12-01T02:27:27.000Z
|
2022-02-16T08:32:11.000Z
|
"""Emoji
Available Commands:
.emoji shrug
.emoji apple
.emoji :/
.emoji -_-"""
from telethon import events
from userbot.utils import admin_cmd
import asyncio
@borg.on(admin_cmd(pattern="puta"))
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.8
animation_ttl = range(0, 16)
input_str = event.pattern_match.group(1)
if input_str == "puta":
await event.edit(input_str)
animation_chars = [
"Shh",
"STAI",
"SEDUTA",
"PUTA🤫🤫"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 4])
@borg.on(admin_cmd(pattern="fottiti"))
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.4
animation_ttl = range(0, 101)
input_str = event.pattern_match.group(1)
if input_str == "fottiti":
await event.edit(input_str)
animation_chars = [
"🖕🏻🖕🏻🖕🏼🖕🏼🖕🏽🖕🏽🖕🏾🖕🏾🖕🏿",
"🖕🏼🖕🏼🖕🏽🖕🏽🖕🏾🖕🏾🖕🏿🖕🏿🖕🏿"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 2])
| 20.272727
| 61
| 0.579223
|
6d1ad5bde7c5e3ed4ddb6d9dd0346fbe1d77df59
| 357
|
py
|
Python
|
src/services/recommender_from_ratings/__init__.py
|
Asa-Nisi-Masa/movie-recommender
|
92752aa302e166885ab3e39b3e9d2ac9185a013e
|
[
"MIT"
] | null | null | null |
src/services/recommender_from_ratings/__init__.py
|
Asa-Nisi-Masa/movie-recommender
|
92752aa302e166885ab3e39b3e9d2ac9185a013e
|
[
"MIT"
] | null | null | null |
src/services/recommender_from_ratings/__init__.py
|
Asa-Nisi-Masa/movie-recommender
|
92752aa302e166885ab3e39b3e9d2ac9185a013e
|
[
"MIT"
] | null | null | null |
from src.services.user_ratings_manager import user_ratings_manager
from src.movie_model import movie_model
from src.services.movie_info_provider import movie_info_provider
from .recommender_from_ratings import RecommenderFromRatings
recommender_from_ratings = RecommenderFromRatings(
user_ratings_manager,
movie_model,
movie_info_provider,
)
| 27.461538
| 66
| 0.859944
|
40ed32d7ab06bd0c4464fc0f1111498dd7d02ab3
| 9,826
|
py
|
Python
|
test/unit/common/middleware/crypto/test_keymaster.py
|
larsbutler/swift
|
216d68eaa861b0607f1a05828f757f19cb8e6b64
|
[
"Apache-2.0"
] | null | null | null |
test/unit/common/middleware/crypto/test_keymaster.py
|
larsbutler/swift
|
216d68eaa861b0607f1a05828f757f19cb8e6b64
|
[
"Apache-2.0"
] | null | null | null |
test/unit/common/middleware/crypto/test_keymaster.py
|
larsbutler/swift
|
216d68eaa861b0607f1a05828f757f19cb8e6b64
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import mock
import unittest
from swift.common import swob
from swift.common.middleware.crypto import keymaster
from swift.common.middleware.crypto.crypto_utils import CRYPTO_KEY_CALLBACK
from swift.common.swob import Request
from test.unit.common.middleware.helpers import FakeSwift, FakeAppThatExcepts
from test.unit.common.middleware.crypto.crypto_helpers import (
TEST_KEYMASTER_CONF)
def capture_start_response():
calls = []
def start_response(*args):
calls.append(args)
return start_response, calls
class TestKeymaster(unittest.TestCase):
def setUp(self):
super(TestKeymaster, self).setUp()
self.swift = FakeSwift()
self.app = keymaster.KeyMaster(self.swift, TEST_KEYMASTER_CONF)
def test_object_path(self):
self.verify_keys_for_path(
'/a/c/o', expected_keys=('object', 'container'))
def test_container_path(self):
self.verify_keys_for_path(
'/a/c', expected_keys=('container',))
def verify_keys_for_path(self, path, expected_keys):
put_keys = None
for method, resp_class, status in (
('PUT', swob.HTTPCreated, '201'),
('POST', swob.HTTPAccepted, '202'),
('GET', swob.HTTPOk, '200'),
('HEAD', swob.HTTPNoContent, '204')):
resp_headers = {}
self.swift.register(
method, '/v1' + path, resp_class, resp_headers, '')
req = Request.blank(
'/v1' + path, environ={'REQUEST_METHOD': method})
start_response, calls = capture_start_response()
self.app(req.environ, start_response)
self.assertEqual(1, len(calls))
self.assertTrue(calls[0][0].startswith(status))
self.assertNotIn('swift.crypto.override', req.environ)
self.assertIn(CRYPTO_KEY_CALLBACK, req.environ,
'%s not set in env' % CRYPTO_KEY_CALLBACK)
keys = req.environ.get(CRYPTO_KEY_CALLBACK)()
self.assertIn('id', keys)
id = keys.pop('id')
self.assertEqual(path, id['path'])
self.assertEqual('1', id['v'])
self.assertListEqual(sorted(expected_keys), sorted(keys.keys()),
'%s %s got keys %r, but expected %r'
% (method, path, keys.keys(), expected_keys))
if put_keys is not None:
# check all key sets were consistent for this path
self.assertDictEqual(put_keys, keys)
else:
put_keys = keys
return put_keys
def test_key_uniqueness(self):
# a rudimentary check that different keys are made for different paths
ref_path_parts = ('a1', 'c1', 'o1')
path = '/' + '/'.join(ref_path_parts)
ref_keys = self.verify_keys_for_path(
path, expected_keys=('object', 'container'))
# for same path and for each differing path check that keys are unique
# when path to object or container is unique and vice-versa
for path_parts in [(a, c, o) for a in ('a1', 'a2')
for c in ('c1', 'c2')
for o in ('o1', 'o2')]:
path = '/' + '/'.join(path_parts)
keys = self.verify_keys_for_path(
path, expected_keys=('object', 'container'))
# object keys should only be equal when complete paths are equal
self.assertEqual(path_parts == ref_path_parts,
keys['object'] == ref_keys['object'],
'Path %s keys:\n%s\npath %s keys\n%s' %
(ref_path_parts, ref_keys, path_parts, keys))
# container keys should only be equal when paths to container are
# equal
self.assertEqual(path_parts[:2] == ref_path_parts[:2],
keys['container'] == ref_keys['container'],
'Path %s keys:\n%s\npath %s keys\n%s' %
(ref_path_parts, ref_keys, path_parts, keys))
def test_filter(self):
factory = keymaster.filter_factory(TEST_KEYMASTER_CONF)
self.assertTrue(callable(factory))
self.assertTrue(callable(factory(self.swift)))
def test_app_exception(self):
app = keymaster.KeyMaster(
FakeAppThatExcepts(), TEST_KEYMASTER_CONF)
req = Request.blank('/', environ={'REQUEST_METHOD': 'PUT'})
start_response, _ = capture_start_response()
self.assertRaises(Exception, app, req.environ, start_response)
def test_root_secret(self):
for secret in (os.urandom(32), os.urandom(33), os.urandom(50)):
encoded_secret = base64.b64encode(secret)
for conf_val in (bytes(encoded_secret), unicode(encoded_secret),
encoded_secret[:30] + '\n' + encoded_secret[30:]):
try:
app = keymaster.KeyMaster(
self.swift, {'encryption_root_secret': conf_val,
'encryption_root_secret_path': ''})
self.assertEqual(secret, app.root_secret)
except AssertionError as err:
self.fail(str(err) + ' for secret %r' % conf_val)
@mock.patch('swift.common.middleware.crypto.keymaster.readconf')
def test_keymaster_config_path(self, mock_readconf):
for secret in (os.urandom(32), os.urandom(33), os.urandom(50)):
enc_secret = base64.b64encode(secret)
for conf_val in (bytes(enc_secret), unicode(enc_secret),
enc_secret[:30] + '\n' + enc_secret[30:],
enc_secret[:30] + '\r\n' + enc_secret[30:]):
for ignored_secret in ('invalid! but ignored!',
'xValidButIgnored' * 10):
mock_readconf.reset_mock()
mock_readconf.return_value = {
'encryption_root_secret': conf_val}
app = keymaster.KeyMaster(self.swift, {
'keymaster_config_path': '/some/path'})
try:
self.assertEqual(secret, app.root_secret)
self.assertEqual(mock_readconf.mock_calls, [
mock.call('/some/path', 'keymaster')])
except AssertionError as err:
self.fail(str(err) + ' for secret %r' % secret)
def test_invalid_root_secret(self):
for secret in (bytes(base64.b64encode(os.urandom(31))), # too short
unicode(base64.b64encode(os.urandom(31))),
u'a' * 44 + u'????', b'a' * 44 + b'????', # not base64
u'a' * 45, b'a' * 45, # bad padding
99, None):
conf = {'encryption_root_secret': secret}
try:
with self.assertRaises(ValueError) as err:
keymaster.KeyMaster(self.swift, conf)
self.assertEqual(
'encryption_root_secret option in proxy-server.conf '
'must be a base64 encoding of at least 32 raw bytes',
err.exception.message)
except AssertionError as err:
self.fail(str(err) + ' for conf %s' % str(conf))
@mock.patch('swift.common.middleware.crypto.keymaster.readconf')
def test_root_secret_path_invalid_secret(self, mock_readconf):
for secret in (bytes(base64.b64encode(os.urandom(31))), # too short
unicode(base64.b64encode(os.urandom(31))),
u'a' * 44 + u'????', b'a' * 44 + b'????', # not base64
u'a' * 45, b'a' * 45, # bad padding
99, None):
mock_readconf.reset_mock()
mock_readconf.return_value = {'encryption_root_secret': secret}
try:
with self.assertRaises(ValueError) as err:
keymaster.KeyMaster(self.swift, {
'keymaster_config_path': '/some/other/path'})
self.assertEqual(
'encryption_root_secret option in /some/other/path '
'must be a base64 encoding of at least 32 raw bytes',
err.exception.message)
self.assertEqual(mock_readconf.mock_calls, [
mock.call('/some/other/path', 'keymaster')])
except AssertionError as err:
self.fail(str(err) + ' for secret %r' % secret)
def test_can_only_configure_secret_in_one_place(self):
conf = {'encryption_root_secret': 'a' * 44,
'keymaster_config_path': '/ets/swift/keymaster.conf'}
with self.assertRaises(ValueError) as err:
keymaster.KeyMaster(self.swift, conf)
self.assertEqual('keymaster_config_path is set, but there are '
'other config options specified!',
err.exception.message)
if __name__ == '__main__':
unittest.main()
| 45.915888
| 79
| 0.567067
|
138e04aea5679cb7ef0ca1e4dda81fece8a17249
| 455
|
py
|
Python
|
ExcelIO/Csv2Excel3.py
|
koichi210/Python
|
9bc0be009bec15499540c1bf9ae802ffe1acfe10
|
[
"MIT"
] | null | null | null |
ExcelIO/Csv2Excel3.py
|
koichi210/Python
|
9bc0be009bec15499540c1bf9ae802ffe1acfe10
|
[
"MIT"
] | null | null | null |
ExcelIO/Csv2Excel3.py
|
koichi210/Python
|
9bc0be009bec15499540c1bf9ae802ffe1acfe10
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#Csv2Excel_append
import csv
import openpyxl
TemplateExcelName = './Sample/Template.xlsx'
ReadCsvName = './Sample/SourceData.csv'
WriteExcelName = './Sample/Template_new.xlsx'
# Bookを開く
WorkBook = openpyxl.load_workbook(TemplateExcelName)
WorkSheet = WorkBook.active
f = open(ReadCsvName)
reader = csv.reader(f, delimiter=',')
for row in reader:
WorkSheet.append(row)
WorkBook.save(WriteExcelName)
WorkBook.close()
| 20.681818
| 52
| 0.742857
|
cac561d4eaecbb78e4ef7548d4118e05b7cd52b7
| 10,646
|
py
|
Python
|
src/protect/mutation_calling/common.py
|
banilmohammed/protect-1
|
24fe7a3829889fdde814003c467fc54d0387245b
|
[
"Apache-2.0"
] | null | null | null |
src/protect/mutation_calling/common.py
|
banilmohammed/protect-1
|
24fe7a3829889fdde814003c467fc54d0387245b
|
[
"Apache-2.0"
] | 3
|
2020-08-23T21:54:20.000Z
|
2020-10-19T22:46:41.000Z
|
src/protect/mutation_calling/common.py
|
Dranion/protect
|
67686040a64ca98e81482d93d39f6df4323b0bd8
|
[
"Apache-2.0"
] | 1
|
2022-03-16T21:08:07.000Z
|
2022-03-16T21:08:07.000Z
|
#!/usr/bin/env python3
# Copyright 2016 UCSC Computational Genomics Lab
# Original contributor: Arjun Arkal Rao
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from protect.common import chrom_sorted, export_results, get_files_from_filestore, untargz
import itertools
import os
def sample_chromosomes(job, genome_fai_file):
"""
Get a list of chromosomes in the input data.
:param toil.fileStore.FileID genome_fai_file: Job store file ID for the genome fai file
:return: Chromosomes in the sample
:rtype: list[str]
"""
work_dir = os.getcwd()
genome_fai = untargz(job.fileStore.readGlobalFile(genome_fai_file), work_dir)
return chromosomes_from_fai(genome_fai)
def chromosomes_from_fai(genome_fai):
"""
Read a fasta index (fai) file and parse the input chromosomes.
:param str genome_fai: Path to the fai file.
:return: list of input chromosomes
:rtype: list[str]
"""
chromosomes = []
with open(genome_fai) as fai_file:
for line in fai_file:
line = line.strip().split()
chromosomes.append(line[0])
return chromosomes
def run_mutation_aggregator(job, mutation_results, univ_options):
"""
Aggregate all the called mutations.
:param dict mutation_results: Dict of dicts of the various mutation callers in a per chromosome
format
:param dict univ_options: Dict of universal options used by almost all tools
:returns: fsID for the merged mutations file
:rtype: toil.fileStore.FileID
"""
# Setup an input data structure for the merge function
out = {}
for chrom in list(mutation_results['mutect'].keys()):
out[chrom] = job.addChildJobFn(merge_perchrom_mutations, chrom, mutation_results,
univ_options).rv()
merged_snvs = job.addFollowOnJobFn(merge_perchrom_vcfs, out, 'merged', univ_options)
job.fileStore.logToMaster('Aggregated mutations for %s successfully' % univ_options['patient'])
return merged_snvs.rv()
def merge_perchrom_mutations(job, chrom, mutations, univ_options):
"""
Merge the mutation calls for a single chromosome.
:param str chrom: Chromosome to process
:param dict mutations: dict of dicts of the various mutation caller names as keys, and a dict of
per chromosome job store ids for vcfs as value
:param dict univ_options: Dict of universal options used by almost all tools
:returns fsID for vcf contaning merged calls for the given chromosome
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
from protect.mutation_calling.muse import process_muse_vcf
from protect.mutation_calling.mutect import process_mutect_vcf
from protect.mutation_calling.radia import process_radia_vcf
from protect.mutation_calling.somaticsniper import process_somaticsniper_vcf
from protect.mutation_calling.strelka import process_strelka_vcf
mutations.pop('indels')
mutations['strelka_indels'] = mutations['strelka']['indels']
mutations['strelka_snvs'] = mutations['strelka']['snvs']
vcf_processor = {'snvs': {'mutect': process_mutect_vcf,
'muse': process_muse_vcf,
'radia': process_radia_vcf,
'somaticsniper': process_somaticsniper_vcf,
'strelka_snvs': process_strelka_vcf
},
'indels': {'strelka_indels': process_strelka_vcf
}
}
# 'fusions': lambda x: None,
# 'indels': lambda x: None}
# For now, let's just say 2 out of n need to call it.
# num_preds = len(mutations)
# majority = int((num_preds + 0.5) / 2)
majority = {'snvs': 2,
'indels': 1}
accepted_hits = defaultdict(dict)
for mut_type in list(vcf_processor.keys()):
# Get input files
perchrom_mutations = {caller: vcf_processor[mut_type][caller](job, mutations[caller][chrom],
work_dir, univ_options)
for caller in vcf_processor[mut_type]}
# Process the strelka key
perchrom_mutations['strelka'] = perchrom_mutations['strelka_' + mut_type]
perchrom_mutations.pop('strelka_' + mut_type)
# Read in each file to a dict
vcf_lists = {caller: read_vcf(vcf_file) for caller, vcf_file in list(perchrom_mutations.items())}
all_positions = list(set(itertools.chain(*list(vcf_lists.values()))))
for position in sorted(all_positions):
hits = {caller: position in vcf_lists[caller] for caller in list(perchrom_mutations.keys())}
if sum(hits.values()) >= majority[mut_type]:
callers = ','.join([caller for caller, hit in list(hits.items()) if hit])
assert position[1] not in accepted_hits[position[0]]
accepted_hits[position[0]][position[1]] = (position[2], position[3], callers)
with open(''.join([work_dir, '/', chrom, '.vcf']), 'w') as outfile:
print('##fileformat=VCFv4.0', file=outfile)
print('##INFO=<ID=callers,Number=.,Type=String,Description=List of supporting callers.',
file=outfile)
print('#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO', file=outfile)
for chrom in chrom_sorted(list(accepted_hits.keys())):
for position in sorted(accepted_hits[chrom]):
print(chrom, position, '.', accepted_hits[chrom][position][0],
accepted_hits[chrom][position][1], '.', 'PASS',
'callers=' + accepted_hits[chrom][position][2], sep='\t', file=outfile)
fsid = job.fileStore.writeGlobalFile(outfile.name)
export_results(job, fsid, outfile.name, univ_options, subfolder='mutations/merged')
return fsid
def read_vcf(vcf_file):
"""
Read a vcf file to a dict of lists.
:param str vcf_file: Path to a vcf file.
:return: dict of lists of vcf records
:rtype: dict
"""
vcf_dict = []
with open(vcf_file, 'r') as invcf:
for line in invcf:
if line.startswith('#'):
continue
line = line.strip().split()
vcf_dict.append((line[0], line[1], line[3], line[4]))
return vcf_dict
def merge_perchrom_vcfs(job, perchrom_vcfs, tool_name, univ_options):
"""
Merge per-chromosome vcf files into a single genome level vcf.
:param dict perchrom_vcfs: Dictionary with chromosome name as key and fsID of the corresponding
vcf as value
:param str tool_name: Name of the tool that generated the vcfs
:returns: fsID for the merged vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {''.join([chrom, '.vcf']): jsid for chrom, jsid in list(perchrom_vcfs.items())}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
first = True
with open(''.join([work_dir, '/', 'all_merged.vcf']), 'w') as outvcf:
for chromvcfname in chrom_sorted([x.rstrip('.vcf') for x in list(input_files.keys())]):
with open(input_files[chromvcfname + '.vcf'], 'r') as infile:
for line in infile:
line = line.strip()
if line.startswith('#'):
if first:
print(line, file=outvcf)
continue
first = False
print(line, file=outvcf)
output_file = job.fileStore.writeGlobalFile(outvcf.name)
export_results(job, output_file, outvcf.name, univ_options, subfolder='mutations/' + tool_name)
job.fileStore.logToMaster('Ran merge_perchrom_vcfs for %s successfully' % tool_name)
return output_file
def unmerge(job, input_vcf, tool_name, chromosomes, tool_options, univ_options):
"""
Un-merge a vcf file into per-chromosome vcfs.
:param str input_vcf: Input vcf
:param str tool_name: The name of the mutation caller
:param list chromosomes: List of chromosomes to retain
:param dict tool_options: Options specific to the mutation caller
:param dict univ_options: Dict of universal options used by almost all tools
:return: dict of fsIDs, one for each chromosomal vcf
:rtype: dict
"""
work_dir = os.getcwd()
input_files = {
'input.vcf': input_vcf,
'genome.fa.fai.tar.gz': tool_options['genome_fai']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
input_files['genome.fa.fai'] = untargz(input_files['genome.fa.fai.tar.gz'], work_dir)
read_chromosomes = defaultdict()
with open(input_files['input.vcf'], 'r') as in_vcf:
header = []
for line in in_vcf:
if line.startswith('#'):
header.append(line)
continue
line = line.strip()
chrom = line.split()[0]
if chrom in read_chromosomes:
print(line, file=read_chromosomes[chrom])
else:
read_chromosomes[chrom] = open(os.path.join(os.getcwd(), chrom + '.vcf'), 'w')
print(''.join(header), file=read_chromosomes[chrom], end='')
print(line, file=read_chromosomes[chrom])
# Process chromosomes that had no mutations
for chrom in set(chromosomes).difference(set(read_chromosomes.keys())):
read_chromosomes[chrom] = open(os.path.join(os.getcwd(), chrom + '.vcf'), 'w')
print(''.join(header), file=read_chromosomes[chrom], end='')
outdict = {}
chroms = set(chromosomes).intersection(set(read_chromosomes.keys()))
for chrom, chromvcf in list(read_chromosomes.items()):
chromvcf.close()
if chrom not in chroms:
continue
outdict[chrom] = job.fileStore.writeGlobalFile(chromvcf.name)
export_results(job, outdict[chrom], chromvcf.name, univ_options,
subfolder='mutations/' + tool_name)
return outdict
| 43.631148
| 105
| 0.642495
|
1969f2eccb1c2ecfe2b89e9296bf3e189187e1bd
| 829
|
py
|
Python
|
examples/undocumented/python/kernel_multiquadric.py
|
cloner1984/shogun
|
901c04b2c6550918acf0594ef8afeb5dcd840a7d
|
[
"BSD-3-Clause"
] | 1
|
2019-03-21T02:03:54.000Z
|
2019-03-21T02:03:54.000Z
|
examples/undocumented/python/kernel_multiquadric.py
|
cloner1984/shogun
|
901c04b2c6550918acf0594ef8afeb5dcd840a7d
|
[
"BSD-3-Clause"
] | null | null | null |
examples/undocumented/python/kernel_multiquadric.py
|
cloner1984/shogun
|
901c04b2c6550918acf0594ef8afeb5dcd840a7d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import shogun as sg
traindat = '../data/fm_train_real.dat'
testdat = '../data/fm_test_real.dat'
parameter_list=[[traindat,testdat, 1.0],[traindat,testdat, 5.0]]
def kernel_multiquadric (train_fname=traindat,test_fname=testdat, shift_coef=1.0):
from shogun import RealFeatures, kernel, distance, CSVFile
feats_train=RealFeatures(CSVFile(train_fname))
feats_test=RealFeatures(CSVFile(test_fname))
distance = sg.distance('EuclideanDistance')
kernel = sg.kernel('MultiquadricKernel', coef=shift_coef,
distance=distance)
kernel.init(feats_train, feats_train)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('Multiquadric')
kernel_multiquadric(*parameter_list[0])
| 27.633333
| 82
| 0.782871
|
9801b77fffc1825a1021844840d1ae3ca5062fb1
| 8,115
|
py
|
Python
|
utilities/utilities.py
|
yazdanv/backend
|
49da8d46e108bc2000fdabc1b991836f2cc50687
|
[
"MIT"
] | 2
|
2021-06-11T21:41:05.000Z
|
2021-06-16T03:58:16.000Z
|
utilities/utilities.py
|
AienTech/backend
|
a1c73e781e571e4a3ec0bc9598df44ccd876cf3c
|
[
"MIT"
] | null | null | null |
utilities/utilities.py
|
AienTech/backend
|
a1c73e781e571e4a3ec0bc9598df44ccd876cf3c
|
[
"MIT"
] | 1
|
2021-05-10T04:40:22.000Z
|
2021-05-10T04:40:22.000Z
|
import coreapi
import magic
import string
import random
import requests
import re
import uuid
from datetime import timedelta
from django.core.validators import EmailValidator, ValidationError
from django.core.cache import cache
from django.conf import settings
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.core.mail import send_mail
from django.contrib.sites.shortcuts import get_current_site
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.utils import six
from django.utils.translation import ugettext as _
from rest_framework.schemas import AutoSchema
from rest_framework import serializers
from utilities import exceptions
from config.utilities import get_int_config_value
def jwt_response_payload_handler(token, user):
"""
Returns the response data for both the login and refresh views.
Override to return a custom response such as including the
serialized representation of the User.
Example:
def jwt_response_payload_handler(token, user=None, request=None):
return {
'token': token,
'user': UserSerializer(user, context={'request': request}). data
}
"""
company = user.company_set.all()
if company:
company = {'company_slug': company[0].company_slug, 'id': company[0].id}
else:
company = None
return {
'token': token,
'user': {
'first_name': user.first_name,
'last_name': user.last_name,
'email': user.profile.email,
'email_confirmed': user.profile.email_confirmed,
'user_name': user.username,
'nick_name': user.profile.nick_name,
'profile_image': user.profile.profile_image,
'admin_of_company': company,
},
}
def forgot_password_delete_account_token_generator():
all_char = string.digits + string.ascii_uppercase
return "".join(random.choice(all_char) for x in range(random.randint(8, 8)))
def check_email_or_username(email_username):
validator = EmailValidator()
try:
validator(email_username)
return settings.EMAIL_USERNAME['EMAIL']
except ValidationError:
return settings.EMAIL_USERNAME['USERNAME']
def jwt_get_secret_key(user):
"""
Use this in generating and checking JWT token,
and when logout jwt_secret will change so previous JWT token wil be invalidate
:param user:
:return:
"""
return user.profile.jwt_secret
class AccountActivationTokenGenerator(PasswordResetTokenGenerator):
"""
Email verification code
"""
def _make_hash_value(self, user, timestamp):
return (
six.text_type(user.pk) + six.text_type(timestamp) +
six.text_type(user.profile.email_confirmed)
)
account_activation_token = AccountActivationTokenGenerator()
def send_email_confirm(user, request):
current_site = get_current_site(request)
subject = 'تایید عضویت JobGuy'
message = render_to_string('email_confirm.html', {
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)).decode(),
'token': account_activation_token.make_token(user),
})
resp = send_mail(subject=subject, message='', from_email=None, recipient_list=[user.email], html_message=message)
def send_password_forget_token_email(user, request, forgot_password_token):
current_site = get_current_site(request)
subject = 'فراموشی رمز عبور'
message = render_to_string('forgot_password_token.html', {
'user_name': user.first_name + ' ' + user.last_name,
'domain': current_site.domain,
'token': forgot_password_token,
})
send_mail(subject=subject, message='', from_email=None, recipient_list=[user.email], html_message=message)
def check_file_exist(path):
resp = requests.post('https://upload.jobguy.work/validate/', json={'path': path})
if resp.status_code == 404:
raise serializers.ValidationError(_('File does not exist'))
elif resp.status_code == 200:
return
else:
raise serializers.ValidationError(_('There is an error with media server connection...'))
CUSTOM_PAGINATION_SCHEMA = AutoSchema(manual_fields=[
coreapi.Field("index", required=False, location="query", type="integer", description="pagination index"),
coreapi.Field("size", required=False, location="query", type="integer", description="pagination size"),
coreapi.Field("order_by", required=False, location="query", type="string", description="sort list"),
])
# custom schema used in swgger to add custom field
CUSTOM_UPLOAD_SCHEMA = AutoSchema(manual_fields=[
coreapi.Field("file", required=False, location="form", type="file", description="upload file")])
def slug_helper(text):
return '-'.join(re.findall('[\w-]+', text)).lower()
def is_slug(text):
slugged_text = slug_helper(text)
if slugged_text == text:
return True
else:
return False
def check_slug_available(model, key, slug_base):
data = {key: slug_base}
i = 0
while model.objects.filter(**data):
i += 1
data = {key: slug_base + '_{}'.format(i)}
if i == 0:
return slug_base
else:
return slug_base + '_{}'.format(i)
def file_uploader(user_uid, path):
if path:
resp = requests.post('https://upload.jobguy.work/download/', json={'url': path, 'uid': user_uid})
if resp.status_code == 200:
return resp.json()['path']
else:
raise serializers.ValidationError(_('There is an error with media server connection...'))
else:
return '/user/2e8d9375bbdb401e46d2251c71752b10/image_2019_3_2_11_914343084.jpeg'
def file_check_name(user_name, file, slug):
file.seek(0)
file_mime = magic.from_buffer(file.read(), mime=True)
file.seek(0)
file_data = {
'uploadfile': (''.join(file.name.split('.')[:-1]), file.read(), file_mime),
}
data = {
'slug': slug,
'token': settings.MEDIA_UPLOAD_TOKEN,
}
resp = requests.post(settings.MEDIA_UPLOAD_PATH, files=file_data, data=data)
if resp.status_code == 200:
return resp.json()['path']
raise serializers.ValidationError(_('There is an error with media server connection...'))
def telegram_notify(content, id=None, type=None, title=None, body=None, name=None):
try:
resp = requests.post('https://bot.jobguy.work/api/message', headers={'token': settings.TELEGRAM_BOT_TOKEN},
json={'content': content, 'id': id, 'type': type, 'title': title,
'body': body, 'name': name})
except Exception as e:
pass
def telegram_notify_channel(content):
try:
resp = requests.post('https://bot.jobguy.work/api/public/message',
headers={'token': settings.TELEGRAM_BOT_TOKEN}, json={'content': content})
except Exception as e:
pass
def uuid_str():
return ''.join(str(uuid.uuid4()).split('-'))
def check_vote_status(instance, user):
if user in instance.vote.all():
return 'UP'
elif user in instance.down_vote.all():
return 'DOWN'
else:
return 'NONE'
def get_client_ip(request):
"""
get client ip from request
:param request:
:return:
"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def back_months_by_3(publish_time, end_time):
for i in range(8, 0, -1):
start_time_period = end_time - timedelta(days=i*90)
start_time_period2 = end_time - timedelta(days=(i-1)*90)
if start_time_period <= publish_time < start_time_period2:
return i
def avg_by_key(lis, key):
count = len(lis)
sum = 0
for item in lis:
sum += item[key]
return count, round((sum/count)/1000000, 1)
| 31.453488
| 117
| 0.67024
|
63ff07af800be41ec071be37f16fb64ab288b715
| 477
|
py
|
Python
|
freight_forwarder/const.py
|
TUNE-Archive/freight_forwarder
|
6ea4a49f474ec04abb8bb81b175c774a16b5312f
|
[
"MIT"
] | null | null | null |
freight_forwarder/const.py
|
TUNE-Archive/freight_forwarder
|
6ea4a49f474ec04abb8bb81b175c774a16b5312f
|
[
"MIT"
] | null | null | null |
freight_forwarder/const.py
|
TUNE-Archive/freight_forwarder
|
6ea4a49f474ec04abb8bb81b175c774a16b5312f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8; -*-
from __future__ import unicode_literals
VERSION = "1.0.3-dev"
# docker api
DOCKER_DEFAULT_TIMEOUT = 120
DOCKER_API_VERSION = '1.20'
# docker labels
PROJECT_LABEL = 'com.freight-forwarder.project'
TEAM_LABEL = 'com.freight-forwarder.team'
VERSION_LABEL = 'com.freight-forwarder.version'
GIT_LABEL = 'com.freight-forwarder.git_sha'
TYPE_LABEL = 'com.freight-forwarder.type'
TIMESTAMP_LABEL = 'com.freight-forwarder.time_stamp'
| 28.058824
| 52
| 0.731656
|
4682080f4e8b9bbd96439d0554dcad3e51a07b8f
| 4,995
|
py
|
Python
|
qa/rpc-tests/wallet-hd.py
|
sanatorium/sanity
|
9f6e64ff94cf9a26406d83266d3e2f47da1213bc
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/wallet-hd.py
|
sanatorium/sanity
|
9f6e64ff94cf9a26406d83266d3e2f47da1213bc
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/wallet-hd.py
|
sanatorium/sanity
|
9f6e64ff94cf9a26406d83266d3e2f47da1213bc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# coding=utf-8
# ^^^^^^^^^^^^ TODO remove when supporting only Python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Hierarchical Deterministic wallet function."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletHDTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
self.nodes = start_nodes(2, self.options.tmpdir, [['-usehd=0'], ['-usehd=1', '-keypool=0']])
self.is_network_split = False
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split=False
self.sync_all()
def run_test (self):
tmpdir = self.options.tmpdir
# Make sure can't switch off usehd after wallet creation
stop_node(self.nodes[1],1)
try:
start_node(1, self.options.tmpdir, ['-usehd=0'])
raise AssertionError("Must not allow to turn off HD on an already existing HD wallet")
except Exception as e:
assert("sanityd exited with status 1 during initialization" in str(e))
# assert_start_raises_init_error(1, self.options.tmpdir, ['-usehd=0'], 'already existing HD wallet')
# self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0'])
connect_nodes_bi(self.nodes, 0, 1)
# Make sure we use hd, keep chainid
chainid = self.nodes[1].getwalletinfo()['hdchainid']
assert_equal(len(chainid), 64)
# create an internal key
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr);
assert_equal(change_addrV["hdkeypath"], "m/44'/1'/0'/1/0") #first internal child key
# Import a non-HD private key in the HD wallet
non_hd_add = self.nodes[0].getnewaddress()
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
# This should be enough to keep the master key and the non-HD key
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
# Derive some HD addresses and remember the last
# Also send funds to each add
self.nodes[0].generate(101)
hd_add = None
num_hd_adds = 300
for i in range(num_hd_adds):
hd_add = self.nodes[1].getnewaddress()
hd_info = self.nodes[1].validateaddress(hd_add)
assert_equal(hd_info["hdkeypath"], "m/44'/1'/0'/0/"+str(i+1))
assert_equal(hd_info["hdchainid"], chainid)
self.nodes[0].sendtoaddress(hd_add, 1)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(non_hd_add, 1)
self.nodes[0].generate(1)
# create an internal key (again)
change_addr = self.nodes[1].getrawchangeaddress()
change_addrV= self.nodes[1].validateaddress(change_addr);
assert_equal(change_addrV["hdkeypath"], "m/44'/1'/0'/1/1") #second internal child key
self.sync_all()
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
print("Restore backup ...")
stop_node(self.nodes[1],1)
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0'])
#connect_nodes_bi(self.nodes, 0, 1)
# Assert that derivation is deterministic
hd_add_2 = None
for _ in range(num_hd_adds):
hd_add_2 = self.nodes[1].getnewaddress()
hd_info_2 = self.nodes[1].validateaddress(hd_add_2)
assert_equal(hd_info_2["hdkeypath"], "m/44'/1'/0'/0/"+str(_+1))
assert_equal(hd_info_2["hdchainid"], chainid)
assert_equal(hd_add, hd_add_2)
# Needs rescan
stop_node(self.nodes[1],1)
self.nodes[1] = start_node(1, self.options.tmpdir, ['-usehd=1', '-keypool=0', '-rescan'])
#connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[1].getbalance(), num_hd_adds + 1)
# send a tx and make sure its using the internal chain for the changeoutput
txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
outs = self.nodes[1].decoderawtransaction(self.nodes[1].gettransaction(txid)['hex'])['vout'];
keypath = ""
for out in outs:
if out['value'] != 1:
keypath = self.nodes[1].validateaddress(out['scriptPubKey']['addresses'][0])['hdkeypath']
assert_equal(keypath[0:13], "m/44'/1'/0'/1")
if __name__ == '__main__':
WalletHDTest().main ()
| 43.815789
| 108
| 0.638839
|
84c2d417623e50060e10115db07d0271c945c13f
| 10,813
|
py
|
Python
|
jsonhandler.py
|
LaDane/Gamehelper
|
55357046471ca8eb560a787b52fd5cbf450d6697
|
[
"MIT"
] | null | null | null |
jsonhandler.py
|
LaDane/Gamehelper
|
55357046471ca8eb560a787b52fd5cbf450d6697
|
[
"MIT"
] | null | null | null |
jsonhandler.py
|
LaDane/Gamehelper
|
55357046471ca8eb560a787b52fd5cbf450d6697
|
[
"MIT"
] | null | null | null |
import json
import os
from filehandler import FileHandler
fh = FileHandler()
class JsonHandler:
def load_data(self):
self.worlditems = fh.load_file('worlditems')
self.currency = fh.load_file('currency')
self.shops = fh.load_file('shops')
self.charactersheet = fh.load_file('charactersheet')
self.characterinventory = fh.load_file('characterinventory')
self.inventory = fh.load_file('inventory')
def show_shop_titles(self): # This function is for when you would like to display the titles in shops.json
self.load_data()
print_str = ""
for title, value in self.shops.items():
print_str += f"{title}\n"
_ = value # this sets it to nothing and we have no problems in code
return print_str
def show_shop_stockid(self): # This function is for when you would like to display shop stock id in shops.json
self.load_data()
print_str = ""
for title, value in self.shops.items():
print_str += f"{title} -\n"
for sid, items in value['Stock'].items():
print_str += f" **{sid}**\n"
_ = items # this sets it to nothing and we have no problems in code
return print_str
def show_BuyStockMsgID(self): # This function is for when you would like to display all shop BuyStockMsgID under stock in shops.json
self.load_data()
print_str = ""
for title, value in self.shops.items():
_ = title
# print_str += f"{title} -\n"
for sid, items in value['Stock'].items():
print_str += f"{items['BuyStockMsgID']},"
_ = sid # this sets it to nothing and we have no problems in code
return print_str
def show_BuyStockMsgEmbed(self): # This function is for when you would like to display all shop BuyStockMsgEmbed under stock in shops.json
self.load_data()
print_str = ""
for title, value in self.shops.items():
_ = title
# print_str += f"{title} -\n"
for sid, items in value['Stock'].items():
print_str += f"{items['BuyStockMsgEmbed']},"
_ = sid # this sets it to nothing and we have no problems in code
return print_str
def show_worlditem_titles(self): # This function is for when you would like to display the TITLES and VALUES in worlditems.json
self.load_data()
print_str = ""
for title, value in self.worlditems.items():
print_str += f"**{title}** - {value['ItemName']}\n"
# _ = value # this sets it to nothing and we have no problems in code
return print_str
def show_currency_titles_values(self): # This function is for when you would like to display the TITLES and VALUES in worlditems.json
self.load_data()
print_str = ""
for title, value in self.currency.items():
print_str += f"**{title}** - {value['Character']}\n"
# _ = value # this sets it to nothing and we have no problems in code
return print_str
def show_character_sheet_embed_ids(self): # This function is for when you would like to display all character sheet embed ID's in charactersheet.json
self.load_data()
print_str = ""
for title, value in self.charactersheet.items():
_ = title
print_str += f"{value['Character Sheet Embed ID']},"
# # print_str += f"{title} -\n"
# for sid, items in value['Stock'].items():
# print_str += f"{items['BuyStockMsgID']},"
# _ = sid # this sets it to nothing and we have no problems in code
return print_str
def show_char_inv_worldids(self): # This function is for when you would like to display all World Items ID under Inventory in characterinventory.json
self.load_data()
print_str = ""
for title, value in self.characterinventory.items():
_ = title
# print_str += f"{title} -\n"
for sid, items in value['Inventory'].items():
print_str += f"{items['WorldID']},"
_ = sid # this sets it to nothing and we have no problems in code
return print_str
# used to auto generate id in react buy
def show_char_inv_numbers(self): # This function is for when you would like to display all Inventory IDS in characterinventory.json
self.load_data()
print_str = ""
for title, value in self.characterinventory.items():
_ = title
# print_str += f"{title} -\n"
for sid, items in value['Inventory'].items():
print_str += f"{sid} "
_ = items # this sets it to nothing and we have no problems in code
return print_str
def show_shop_stockid2(self): # This function is for when you would like to display shop stock id in shops.json
self.load_data()
print_str = ""
for title, value in self.shops.items():
_ = title
# print_str += f"{title} -\n"
for sid, items in value['Stock'].items():
print_str += f"{sid} "
_ = items # this sets it to nothing and we have no problems in code
return print_str
def show_worlditem_titles2(self): # This function is for when you would like to display the TITLES and in worlditems.json
self.load_data()
print_str = ""
for title, value in self.worlditems.items():
print_str += f"{title} "
_ = value # this sets it to nothing and we have no problems in code
return print_str
def show_character_sheet_stats(self): # This function is for when you would like to display all character sheet stats categories in charactersheet.json
self.load_data()
print_str = ""
for title, value in self.charactersheet.items():
_ = title
for sid, items in value['Character Sheet'].items():
str_sid = str(sid)
print_str += f"{str_sid},"
_ = items # this sets it to nothing and we have no problems in code
return print_str
def show_character_inventory_embed_ids(self): # This function is for when you would like to display all the character inventory embed IDs in characterinventory.json
self.load_data()
print_str = ""
for title, value in self.characterinventory.items():
_ = title
# print_str += f"{title} -\n"
for sid, items in value['Inventory'].items():
print_str += f"{items['Character Inventory Embed']},"
_ = sid # this sets it to nothing and we have no problems in code
return print_str
def show_inv_ids(self): # This function is for when you would like to display all Inventory IDS in inventory.json
self.load_data()
print_str = ""
for title, value in self.inventory.items():
_ = value
print_str += f"{title},"
# for sid, items in value['Inventory'].items():
# print_str += f"{sid},"
# _ = items # this sets it to nothing and we have no problems in code
return print_str
def show_inv_titles_and_values(self): # This function is for when you would like to display the titles with the inventory names in inventory.json
self.load_data()
print_str = ""
for title, value in self.inventory.items():
print_str += f"**{title}** - {value['Inventory_Name']}\n"
return print_str
def show_inventory_store_items_channel_ids(self): # This function is for when you would like list all the store-items channel ids in inventory.json
self.load_data()
print_str = ""
for title, value in self.inventory.items():
_ = title
print_str += f"{value['Store_Items_Channel_ID']},"
return print_str
def show_shared_inv_inv_worldids(self): # This function is for when you would like to display all World Items ID under Inventory in inventory.json
self.load_data()
print_str = ""
for title, value in self.inventory.items():
_ = title
# print_str += f"{title} -\n"
for sid, items in value['Inventory'].items():
print_str += f"{items['WorldID']},"
_ = sid # this sets it to nothing and we have no problems in code
return print_str
def show_shared_inv_inv_numbers(self): # This function is for when you would like to display all Inventory IDS in inventory.json
self.load_data()
print_str = ""
for title, value in self.inventory.items():
_ = title
# print_str += f"{title} -\n"
for sid, items in value['Inventory'].items():
print_str += f"{sid} "
_ = items # this sets it to nothing and we have no problems in code
return print_str
def show_inventory_inventory_embed_ids(self): # This function is for when you would like to display all the inventory inventory embed IDs in characterinventory.json
self.load_data()
print_str = ""
for title, value in self.inventory.items():
_ = title
# print_str += f"{title} -\n"
for sid, items in value['Inventory'].items():
print_str += f"{items['Character_Inventory_Embed']},"
_ = sid # this sets it to nothing and we have no problems in code
return print_str
# Fjerne title
# def show_inventory_inventory_embed_ids(self): # This function is for when you would like to display all the inventory inventory embed IDs in characterinventory.json
# self.load_data()
# print_str = ""
# for value in self.inventory.values():
# # print_str += f"{title} -\n"
# for sid, items in value['Inventory'].items():
# print_str += f"{items['Character_Inventory_Embed']},"
# _ = sid # this sets it to nothing and we have no problems in code
# return print_str
# Fjerne value
# def show_inventory_inventory_embed_ids(self): # This function is for when you would like to display all the inventory inventory embed IDs in characterinventory.json
# self.load_data()
# print_str = ""
# for value in self.inventory.keys():
# # print_str += f"{title} -\n"
# for sid, items in value['Inventory'].items():
# print_str += f"{items['Character_Inventory_Embed']},"
# _ = sid # this sets it to nothing and we have no problems in code
# return print_str
| 46.407725
| 170
| 0.603163
|
d7d9ae65c7b7bd3df4b2e7e1208130ca6aa15d4d
| 4,890
|
py
|
Python
|
docs/conf.py
|
mthnglac/parasut-cli
|
de2a1e69a63d5036e1486aad732ae4bc471e75dc
|
[
"MIT"
] | 2
|
2021-03-08T06:53:31.000Z
|
2021-06-06T17:12:31.000Z
|
docs/conf.py
|
mthnglac/parasut-cli
|
de2a1e69a63d5036e1486aad732ae4bc471e75dc
|
[
"MIT"
] | 17
|
2021-03-06T21:56:56.000Z
|
2022-03-12T14:09:06.000Z
|
docs/conf.py
|
mthnglac/parasut-cli
|
de2a1e69a63d5036e1486aad732ae4bc471e75dc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# parasut_cli documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import parasut_cli
import sphinx_rtd_theme
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx_rtd_theme',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Parasut CLI'
copyright = "2021, Metehan Gulac"
author = "Metehan Gulac"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = parasut_cli.__version__
# The full version, including alpha/beta/rc tags.
release = parasut_cli.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'parasut_clidoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'parasut_cli.tex',
'Parasut CLI Documentation',
'Metehan Gulac', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'parasut_cli',
'Parasut CLI Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'parasut_cli',
'Parasut CLI Documentation',
author,
'parasut_cli',
'One line description of project.',
'Miscellaneous'),
]
| 29.107143
| 77
| 0.686503
|
d30696354fa579679b3adcdf399fe7fbb0166d97
| 3,288
|
py
|
Python
|
newsCrawl/fakeNews/fakeNews/settings.py
|
ARIF-KHAN-420/Fake_News
|
acfbffcce454afc09c4a7b06205c1a632c11f822
|
[
"MIT"
] | 1
|
2022-01-03T17:54:03.000Z
|
2022-01-03T17:54:03.000Z
|
newsCrawl/fakeNews/fakeNews/settings.py
|
arifkhan-silicornya/Fake_News
|
acfbffcce454afc09c4a7b06205c1a632c11f822
|
[
"MIT"
] | null | null | null |
newsCrawl/fakeNews/fakeNews/settings.py
|
arifkhan-silicornya/Fake_News
|
acfbffcce454afc09c4a7b06205c1a632c11f822
|
[
"MIT"
] | null | null | null |
"""
Django settings for fakeNews project.
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'tgka2ma#o=^^ojorwdfvcg&h7z*hnmq6@5#s#v-w$cp3$ikx+n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Custom app
'authentication',
'index',
'actionfakeNews',
'about',
'contact',
'crawlapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fakeNews.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fakeNews.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR , 'static')
]
STATIC_ROOT = os.path.join(BASE_DIR , 'assets')
#SMTP configuration
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST='smtp.gmail.com'
EMAIL_PORT= 587
EMAIL_USE_TLS= True
EMAIL_HOST_USER=''
EMAIL_HOST_PASSWORD=''
| 23.826087
| 91
| 0.690085
|
cbec1cdb1b3985aa84e9207fef46e44f823483f4
| 5,629
|
py
|
Python
|
AirplaneReservationSystem/settings.py
|
BFlameSwift/AirplaneReservationSystem
|
bbabb0e258c72eb50fcbbf7ade437e38a39e6f02
|
[
"MIT"
] | 3
|
2021-06-19T09:40:13.000Z
|
2021-06-19T17:09:54.000Z
|
AirplaneReservationSystem/settings.py
|
BFlameSwift/AirplaneReservationSystem
|
bbabb0e258c72eb50fcbbf7ade437e38a39e6f02
|
[
"MIT"
] | null | null | null |
AirplaneReservationSystem/settings.py
|
BFlameSwift/AirplaneReservationSystem
|
bbabb0e258c72eb50fcbbf7ade437e38a39e6f02
|
[
"MIT"
] | 1
|
2021-12-05T14:51:51.000Z
|
2021-12-05T14:51:51.000Z
|
"""
Django settings for AirplaneReservationSystem project.
Generated by 'django-admin startproject' using Django 3.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-&ug@^cl*09rt)r179eb$&%e4jfghiae4tbp%8$*66s&+bge+t-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'corsheaders',
'login',
'background',
# 'captcha',
'kernel',
# 'rest_framework',
'rest_framework',
'rest_framework_swagger',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
APPEND_SLASH=False
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
# CORS_ORIGIN_WHITELIST = (
# '*'
# )
CORS_ORIGIN_WHITELIST = (
'http://127.0.0.1:8000',
'http://localhost:8000',
'https://unitradeprod.alipaydev.com',
'https://excashier.alipaydev.com',
)
CORS_ALLOW_METHODS = (
'GET',
'POST',
)
CORS_ALLOW_HEADERS = (
'XMLHttpRequest',
'X_FILENAME',
'accept-encoding',
'authorization',
'content-type',
'dnt',
'origin',
'user-agent',
'x-csrftoken',
'x-requested-with',
'Pragma',
)
# ALLOWED_HOSTS = ['*']
ROOT_URLCONF = 'AirplaneReservationSystem.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# 'DIRS': [BASE_DIR / 'templates']
'DIRS': [BASE_DIR/ 'front/dist'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'AirplaneReservationSystem.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
# }
DATABASES ={
'default':{
'ENGINE':'django.db.backends.mysql',
'NAME':'', #TODO Replace it with yours
'USER': '', #TODO Replace it with yours
'PASSWORD':'', #TODO Replace it with yours
'HOST':'localhost',
'PORT':'3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
from django.conf import settings
settings.configure(
ROOT_URLCONF=__name__,
)
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated'
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication'
)
}
import datetime
JWT_AUTH = {
# 设置token有效时间
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=60 * 60 * 2)
}
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False # 这里修改了
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
# APPEND_SLASH=False
# STATIC_URL = '/sta
# tic/'
STATIC_URL = '/front/dist/static/'
STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static'),
os.path.join(BASE_DIR,'front/dist/static')
]
# STATIC_ROOT = os.path.join(BASE_DIR,'front/dist/static')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.126.com'
EMAIL_PORT = 25
EMAIL_HOST_USER = '@126.com' # your email
EMAIL_HOST_PASSWORD = '' #TODO Replace it with your smtp authorization code
CONFIRM_DAYS = 7
# 此处密码应为开启 SMTP 的授权码
| 25.242152
| 91
| 0.686978
|
5fa65247f308c10d37eba502ff149a0021f0daf6
| 13,182
|
py
|
Python
|
grid/migrations/0001_initial.py
|
santagada/djangopackages
|
18515cce5e220475e2d440455fd5117ba35e0bd9
|
[
"MIT"
] | 2
|
2015-06-09T19:28:34.000Z
|
2015-09-21T15:44:18.000Z
|
grid/migrations/0001_initial.py
|
IlianIliev/opencomparison
|
1bbf9ab7b383012764adc3054dbaafd664b5520b
|
[
"MIT"
] | null | null | null |
grid/migrations/0001_initial.py
|
IlianIliev/opencomparison
|
1bbf9ab7b383012764adc3054dbaafd664b5520b
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
("package", "0016_auto__del_field_package_pypi_home_page"),
)
def forwards(self, orm):
# Adding model 'Grid'
db.create_table('grid_grid', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50, db_index=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('is_locked', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('grid', ['Grid'])
# Adding model 'GridPackage'
db.create_table('grid_gridpackage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('grid', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['grid.Grid'])),
('package', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['package.Package'])),
))
db.send_create_signal('grid', ['GridPackage'])
# Adding model 'Feature'
db.create_table('grid_feature', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('grid', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['grid.Grid'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('grid', ['Feature'])
# Adding model 'Element'
db.create_table('grid_element', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('grid_package', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['grid.GridPackage'])),
('feature', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['grid.Feature'])),
('text', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('grid', ['Element'])
def backwards(self, orm):
# Deleting model 'Grid'
db.delete_table('grid_grid')
# Deleting model 'GridPackage'
db.delete_table('grid_gridpackage')
# Deleting model 'Feature'
db.delete_table('grid_feature')
# Deleting model 'Element'
db.delete_table('grid_element')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'grid.element': {
'Meta': {'ordering': "['-id']", 'object_name': 'Element'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'feature': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['grid.Feature']"}),
'grid_package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['grid.GridPackage']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'grid.feature': {
'Meta': {'object_name': 'Feature'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'grid': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['grid.Grid']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'grid.grid': {
'Meta': {'ordering': "['title']", 'object_name': 'Grid'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'packages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['package.Package']", 'through': "orm['grid.GridPackage']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'grid.gridpackage': {
'Meta': {'object_name': 'GridPackage'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'grid': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['grid.Grid']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['package.Package']"})
},
'package.category': {
'Meta': {'ordering': "['title']", 'object_name': 'Category'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'show_pypi': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': "'50'"}),
'title_plural': ('django.db.models.fields.CharField', [], {'max_length': "'50'", 'blank': 'True'})
},
'package.package': {
'Meta': {'ordering': "['title']", 'object_name': 'Package'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['package.Category']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'creator'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modifier'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'participants': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'pypi_downloads': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pypi_home_page': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'pypi_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'related_packages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_packages_rel_+'", 'blank': 'True', 'to': "orm['package.Package']"}),
'repo_commits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'repo_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'repo_forks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'repo_url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'}),
'repo_watchers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': "'100'"}),
'usage': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
}
}
complete_apps = ['grid']
| 71.254054
| 183
| 0.578289
|
631cb7537a683041b336211a26306300c1766471
| 3,990
|
py
|
Python
|
bokeh/io/tests/test_webdriver.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 12
|
2020-07-20T14:58:31.000Z
|
2021-09-04T22:15:14.000Z
|
bokeh/io/tests/test_webdriver.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 4
|
2021-03-18T22:30:03.000Z
|
2022-02-12T06:12:28.000Z
|
bokeh/io/tests/test_webdriver.py
|
kevin1kevin1k/bokeh
|
9f34b5b710e2748ec803c12918ec1706098a3477
|
[
"BSD-3-Clause"
] | 3
|
2019-03-27T23:27:05.000Z
|
2020-08-05T19:03:19.000Z
|
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import selenium.webdriver.phantomjs.webdriver
import six
# Bokeh imports
# Module under test
import bokeh.io.webdriver as biw
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def test_create_phantomjs_webdriver():
d = biw.create_phantomjs_webdriver()
assert isinstance(d, selenium.webdriver.phantomjs.webdriver.WebDriver)
@pytest.mark.skipif(six.PY2, reason="checking service not a reliable indicator on Py2")
def test_terminate_webdriver():
d = biw.create_phantomjs_webdriver()
assert d.service.process is not None
biw.terminate_webdriver(d)
assert d.service.process is None
_driver_map = {
'phantomjs': selenium.webdriver.phantomjs.webdriver.WebDriver,
}
class Test_webdriver_control(object):
def test_default(self):
# other tests may have interacted with the global biw.webdriver_control,
# so create a new instance only to check default values
wc = biw._WebdriverState()
assert wc.reuse == True
assert wc.kind == "phantomjs"
assert wc.current is None
def test_get_with_reuse(self):
biw.webdriver_control.reuse = True
assert biw.webdriver_control.reuse == True
d1 = biw.webdriver_control.get()
d2 = biw.webdriver_control.get()
assert d1 is d2
biw.webdriver_control.reset()
def test_get_with_reuse_and_reset(self):
biw.webdriver_control.reuse = True
assert biw.webdriver_control.reuse == True
d1 = biw.webdriver_control.get()
biw.webdriver_control.reset()
d2 = biw.webdriver_control.get()
assert d1 is not d2
d3 = biw.webdriver_control.get()
assert d2 is d3
biw.webdriver_control.reset()
def test_get_without_reuse(self):
biw.webdriver_control.reuse = False
assert biw.webdriver_control.reuse == False
d1 = biw.webdriver_control.get()
d2 = biw.webdriver_control.get()
assert d1 is not d2
biw.webdriver_control.reuse = True
biw.webdriver_control.reset()
@pytest.mark.parametrize('kind', ['phantomjs'])
def test_create(self, kind):
biw.webdriver_control.kind = kind
assert biw.webdriver_control.kind == kind
d = biw.webdriver_control.create()
assert isinstance(d, _driver_map[kind])
biw.webdriver_control.reset()
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 36.272727
| 87
| 0.475689
|
79ecf1019d72aca6cc9e348c830ff7b12a783a0a
| 2,142
|
py
|
Python
|
resilient-sdk/tests/unit/test_util/test_jinja_filters.py
|
COLDTURNIP/resilient-python-api
|
14423f1dec32af67f7203c8d4d36d0a9e2651802
|
[
"MIT"
] | null | null | null |
resilient-sdk/tests/unit/test_util/test_jinja_filters.py
|
COLDTURNIP/resilient-python-api
|
14423f1dec32af67f7203c8d4d36d0a9e2651802
|
[
"MIT"
] | null | null | null |
resilient-sdk/tests/unit/test_util/test_jinja_filters.py
|
COLDTURNIP/resilient-python-api
|
14423f1dec32af67f7203c8d4d36d0a9e2651802
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2020. All Rights Reserved.
from jinja2 import Environment
from resilient_sdk.util import jinja2_filters
def mock_setup_jinja_env():
return Environment()
def test_filter_base64():
mock_text = u"convert ᠭ ᠮ ᠯ me to base64"
filtered_text = jinja2_filters._filter_base64(mock_text)
assert filtered_text == "ImNvbnZlcnQgXHUxODJkIFx1MTgyZSBcdTE4MmYgbWUgdG8gYmFzZTY0Ig==\n"
def test_filter_camel():
mock_text = "Please#ReTurn_++Pla1n Camel Case"
filtered_text = jinja2_filters._filter_camel(mock_text)
assert filtered_text == u"PleaseReturnPla1NCamelCase"
def test_dot_py():
mock_text = "validating setup.py"
filtered_text = jinja2_filters._dot_py(mock_text)
assert filtered_text == u"validating `setup.py`"
def test_scrub_ansi():
mock_text = "\033[92msome green text\033[0m"
filtered_text = jinja2_filters._scrub_ansi(mock_text)
assert filtered_text == u"some green text"
def test_convert_to_code():
mock_text = "'''pip install -U 'resilient-circuits''''"
filtered_text = jinja2_filters._convert_to_code(mock_text)
assert "```shell\n$ pip install -U \"resilient-circuits\"\n```" in filtered_text
def test_defaults_to_code():
mock_text = "<<example url>>"
filtered_text = jinja2_filters._defaults_to_code(mock_text)
assert filtered_text == "`<<example url>>`"
def test_render_diff():
mock_text = "\n\t\t--- from\n\t\t+++ to\n\t\t@@ -1 +1 @@\n\t\t-no\n\t\t+yes\n\t\t"
filtered_text = jinja2_filters._render_diff(mock_text)
assert "```diff\n--- from\n+++ to\n@@ -1 +1 @@\n-no\n+yes\n```" in filtered_text
def test_readable_time_from_timestamp():
mock_timestamp = "20211118104506"
converted_time = jinja2_filters._readable_time_from_timestamp(mock_timestamp)
assert "2021/11/18 10:45:06" == converted_time
def test_add_filters_to_jinja_env():
jinja_env = mock_setup_jinja_env()
jinja2_filters.add_filters_to_jinja_env(jinja_env)
assert all(elem in jinja_env.filters for elem in jinja2_filters.JINJA_FILTERS.keys()) is True
| 25.807229
| 97
| 0.72549
|
b6b95893acccc0e47035f4439619793ca11e8d07
| 29,612
|
py
|
Python
|
tests/dag_processing/test_processor.py
|
higrys/airflow
|
6b133681e2400cd610ef0b37fb2d30c1e3340c59
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2021-07-30T16:59:35.000Z
|
2021-08-03T13:51:45.000Z
|
tests/dag_processing/test_processor.py
|
higrys/airflow
|
6b133681e2400cd610ef0b37fb2d30c1e3340c59
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/dag_processing/test_processor.py
|
higrys/airflow
|
6b133681e2400cd610ef0b37fb2d30c1e3340c59
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import os
import unittest
from datetime import timedelta
from tempfile import NamedTemporaryFile
from unittest import mock
from unittest.mock import MagicMock, patch
import pytest
from parameterized import parameterized
from airflow import settings
from airflow.configuration import conf
from airflow.dag_processing.processor import DagFileProcessor
from airflow.jobs.scheduler_job import SchedulerJob
from airflow.models import DAG, DagBag, DagModel, SlaMiss, TaskInstance
from airflow.models.dagrun import DagRun
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.operators.bash import BashOperator
from airflow.operators.dummy import DummyOperator
from airflow.serialization.serialized_objects import SerializedDAG
from airflow.utils import timezone
from airflow.utils.callback_requests import TaskCallbackRequest
from airflow.utils.dates import days_ago
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from tests.test_utils.config import conf_vars, env_vars
from tests.test_utils.db import (
clear_db_dags,
clear_db_import_errors,
clear_db_jobs,
clear_db_pools,
clear_db_runs,
clear_db_serialized_dags,
clear_db_sla_miss,
)
from tests.test_utils.mock_executor import MockExecutor
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
@pytest.fixture(scope="class")
def disable_load_example():
with conf_vars({('core', 'load_examples'): 'false'}):
with env_vars({('core', 'load_examples'): 'false'}):
yield
@pytest.mark.usefixtures("disable_load_example")
class TestDagFileProcessor(unittest.TestCase):
@staticmethod
def clean_db():
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_import_errors()
clear_db_jobs()
clear_db_serialized_dags()
def setUp(self):
self.clean_db()
# Speed up some tests by not running the tasks, just look at what we
# enqueue!
self.null_exec = MockExecutor()
self.scheduler_job = None
def tearDown(self) -> None:
if self.scheduler_job and self.scheduler_job.processor_agent:
self.scheduler_job.processor_agent.end()
self.scheduler_job = None
self.clean_db()
def create_test_dag(self, start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + timedelta(hours=1), **kwargs):
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=start_date,
# Make sure it only creates a single DAG Run
end_date=end_date,
)
dag.clear()
dag.is_subdag = False
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id, is_paused=False)
session.merge(orm_dag)
session.commit()
return dag
@classmethod
def setUpClass(cls):
# Ensure the DAGs we are looking at from the DB are up-to-date
non_serialized_dagbag = DagBag(read_dags_from_db=False, include_examples=False)
non_serialized_dagbag.sync_to_db()
cls.dagbag = DagBag(read_dags_from_db=True)
@staticmethod
def assert_scheduled_ti_count(session, count):
assert count == session.query(TaskInstance).filter_by(state=State.SCHEDULED).count()
def test_dag_file_processor_sla_miss_callback(self):
"""
Test that the dag file processor calls the sla miss callback
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
test_start_date = days_ago(1)
dag = DAG(
dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date, 'sla': datetime.timedelta()},
)
task = DummyOperator(task_id='dummy', dag=dag, owner='airflow')
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='success'))
session.merge(SlaMiss(task_id='dummy', dag_id='test_sla_miss', execution_date=test_start_date))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.manage_slas(dag=dag, session=session)
assert sla_callback.called
def test_dag_file_processor_sla_miss_callback_invalid_sla(self):
"""
Test that the dag file processor does not call the sla miss callback when
given an invalid sla
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
# Pass anything besides a timedelta object to the sla argument.
test_start_date = days_ago(1)
dag = DAG(
dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date, 'sla': None},
)
task = DummyOperator(task_id='dummy', dag=dag, owner='airflow')
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='success'))
session.merge(SlaMiss(task_id='dummy', dag_id='test_sla_miss', execution_date=test_start_date))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_dag_file_processor_sla_miss_callback_sent_notification(self):
"""
Test that the dag file processor does not call the sla_miss_callback when a
notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day
# ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(
dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date, 'sla': datetime.timedelta(days=1)},
)
task = DummyOperator(task_id='dummy', dag=dag, owner='airflow')
# Create a TaskInstance for two days ago
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(
SlaMiss(
task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True,
)
)
# Now call manage_slas and see if the sla_miss callback gets called
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_dag_file_processor_sla_miss_callback_exception(self):
"""
Test that the dag file processor gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(
dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date},
)
task = DummyOperator(task_id='dummy', dag=dag, owner='airflow', sla=datetime.timedelta(hours=1))
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy', dag_id='test_sla_miss', execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
mock_log = mock.MagicMock()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock_log)
dag_file_processor.manage_slas(dag=dag, session=session)
assert sla_callback.called
mock_log.exception.assert_called_once_with(
'Could not call sla_miss_callback for DAG %s', 'test_sla_miss'
)
@mock.patch('airflow.dag_processing.processor.send_email')
def test_dag_file_processor_only_collect_emails_from_sla_missed_tasks(self, mock_send_email):
session = settings.Session()
test_start_date = days_ago(2)
dag = DAG(
dag_id='test_sla_miss',
default_args={'start_date': test_start_date, 'sla': datetime.timedelta(days=1)},
)
email1 = 'test1@test.com'
task = DummyOperator(
task_id='sla_missed', dag=dag, owner='airflow', email=email1, sla=datetime.timedelta(hours=1)
)
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='Success'))
email2 = 'test2@test.com'
DummyOperator(task_id='sla_not_missed', dag=dag, owner='airflow', email=email2)
session.merge(SlaMiss(task_id='sla_missed', dag_id='test_sla_miss', execution_date=test_start_date))
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.manage_slas(dag=dag, session=session)
assert len(mock_send_email.call_args_list) == 1
send_email_to = mock_send_email.call_args_list[0][0][0]
assert email1 in send_email_to
assert email2 not in send_email_to
@mock.patch('airflow.dag_processing.processor.Stats.incr')
@mock.patch("airflow.utils.email.send_email")
def test_dag_file_processor_sla_miss_email_exception(self, mock_send_email, mock_stats_incr):
"""
Test that the dag file processor gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(
dag_id='test_sla_miss',
default_args={'start_date': test_start_date, 'sla': datetime.timedelta(days=1)},
)
task = DummyOperator(
task_id='dummy', dag=dag, owner='airflow', email='test@test.com', sla=datetime.timedelta(hours=1)
)
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy', dag_id='test_sla_miss', execution_date=test_start_date))
mock_log = mock.MagicMock()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock_log)
dag_file_processor.manage_slas(dag=dag, session=session)
mock_log.exception.assert_called_once_with(
'Could not send SLA Miss email notification for DAG %s', 'test_sla_miss'
)
mock_stats_incr.assert_called_once_with('sla_email_notification_failure')
def test_dag_file_processor_sla_miss_deleted_task(self):
"""
Test that the dag file processor will not crash when trying to send
sla miss notification for a deleted task
"""
session = settings.Session()
test_start_date = days_ago(2)
dag = DAG(
dag_id='test_sla_miss',
default_args={'start_date': test_start_date, 'sla': datetime.timedelta(days=1)},
)
task = DummyOperator(
task_id='dummy', dag=dag, owner='airflow', email='test@test.com', sla=datetime.timedelta(hours=1)
)
session.merge(TaskInstance(task=task, execution_date=test_start_date, state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(
SlaMiss(task_id='dummy_deleted', dag_id='test_sla_miss', execution_date=test_start_date)
)
mock_log = mock.MagicMock()
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock_log)
dag_file_processor.manage_slas(dag=dag, session=session)
@parameterized.expand(
[
[State.NONE, None, None],
[
State.UP_FOR_RETRY,
timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15),
],
[
State.UP_FOR_RESCHEDULE,
timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15),
],
]
)
def test_dag_file_processor_process_task_instances(self, state, start_date, end_date):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
dag = DAG(dag_id='test_scheduler_process_execute_task', start_date=DEFAULT_DATE)
BashOperator(task_id='dummy', dag=dag, owner='airflow', bash_command='echo hi')
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
self.scheduler_job = SchedulerJob(subdir=os.devnull)
self.scheduler_job.processor_agent = mock.MagicMock()
self.scheduler_job.dagbag.bag_dag(dag, root_dag=dag)
dag.clear()
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
assert dr is not None
with create_session() as session:
ti = dr.get_task_instances(session=session)[0]
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
self.scheduler_job._schedule_dag_run(dr, session)
self.assert_scheduled_ti_count(session, 1)
session.refresh(ti)
assert ti.state == State.SCHEDULED
@parameterized.expand(
[
[State.NONE, None, None],
[
State.UP_FOR_RETRY,
timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15),
],
[
State.UP_FOR_RESCHEDULE,
timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15),
],
]
)
def test_dag_file_processor_process_task_instances_with_task_concurrency(
self,
state,
start_date,
end_date,
):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
dag = DAG(dag_id='test_scheduler_process_execute_task_with_task_concurrency', start_date=DEFAULT_DATE)
BashOperator(task_id='dummy', task_concurrency=2, dag=dag, owner='airflow', bash_command='echo Hi')
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
self.scheduler_job = SchedulerJob(subdir=os.devnull)
self.scheduler_job.processor_agent = mock.MagicMock()
self.scheduler_job.dagbag.bag_dag(dag, root_dag=dag)
dag.clear()
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
assert dr is not None
with create_session() as session:
ti = dr.get_task_instances(session=session)[0]
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
self.scheduler_job._schedule_dag_run(dr, session)
self.assert_scheduled_ti_count(session, 1)
session.refresh(ti)
assert ti.state == State.SCHEDULED
@parameterized.expand(
[
[State.NONE, None, None],
[
State.UP_FOR_RETRY,
timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15),
],
[
State.UP_FOR_RESCHEDULE,
timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15),
],
]
)
def test_dag_file_processor_process_task_instances_depends_on_past(self, state, start_date, end_date):
"""
Test if _process_task_instances puts the right task instances into the
mock_list.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task_depends_on_past',
start_date=DEFAULT_DATE,
default_args={
'depends_on_past': True,
},
)
BashOperator(task_id='dummy1', dag=dag, owner='airflow', bash_command='echo hi')
BashOperator(task_id='dummy2', dag=dag, owner='airflow', bash_command='echo hi')
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
self.scheduler_job = SchedulerJob(subdir=os.devnull)
self.scheduler_job.processor_agent = mock.MagicMock()
self.scheduler_job.dagbag.bag_dag(dag, root_dag=dag)
dag.clear()
dr = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
assert dr is not None
with create_session() as session:
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
self.scheduler_job._schedule_dag_run(dr, session)
self.assert_scheduled_ti_count(session, 2)
session.refresh(tis[0])
session.refresh(tis[1])
assert tis[0].state == State.SCHEDULED
assert tis[1].state == State.SCHEDULED
def test_scheduler_job_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(dag_id='test_scheduler_add_new_task', start_date=DEFAULT_DATE)
BashOperator(task_id='dummy', dag=dag, owner='airflow', bash_command='echo test')
self.scheduler_job = SchedulerJob(subdir=os.devnull)
self.scheduler_job.dagbag.bag_dag(dag, root_dag=dag)
# Since we don't want to store the code for the DAG defined in this file
with mock.patch.object(settings, "STORE_DAG_CODE", False):
self.scheduler_job.dagbag.sync_to_db()
session = settings.Session()
orm_dag = session.query(DagModel).get(dag.dag_id)
assert orm_dag is not None
if self.scheduler_job.processor_agent:
self.scheduler_job.processor_agent.end()
self.scheduler_job = SchedulerJob(subdir=os.devnull)
self.scheduler_job.processor_agent = mock.MagicMock()
dag = self.scheduler_job.dagbag.get_dag('test_scheduler_add_new_task', session=session)
self.scheduler_job._create_dag_runs([orm_dag], session)
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
tis = dr.get_task_instances()
assert len(tis) == 1
BashOperator(task_id='dummy2', dag=dag, owner='airflow', bash_command='echo test')
SerializedDagModel.write_dag(dag=dag)
self.scheduler_job._schedule_dag_run(dr, session)
self.assert_scheduled_ti_count(session, 2)
session.flush()
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
tis = dr.get_task_instances()
assert len(tis) == 2
def test_runs_respected_after_clear(self):
"""
Test dag after dag.clear, max_active_runs is respected
"""
dag = DAG(dag_id='test_scheduler_max_active_runs_respected_after_clear', start_date=DEFAULT_DATE)
dag.max_active_runs = 1
BashOperator(task_id='dummy', dag=dag, owner='airflow', bash_command='echo Hi')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
dag = SerializedDAG.from_dict(SerializedDAG.to_dict(dag))
# Write Dag to DB
dagbag = DagBag(dag_folder="/dev/null", include_examples=False, read_dags_from_db=False)
dagbag.bag_dag(dag, root_dag=dag)
dagbag.sync_to_db()
dag = DagBag(read_dags_from_db=True, include_examples=False).get_dag(dag.dag_id)
self.scheduler_job = SchedulerJob(subdir=os.devnull)
self.scheduler_job.processor_agent = mock.MagicMock()
self.scheduler_job.dagbag.bag_dag(dag, root_dag=dag)
date = DEFAULT_DATE
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=date,
state=State.QUEUED,
)
date = dag.following_schedule(date)
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=date,
state=State.QUEUED,
)
date = dag.following_schedule(date)
dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=date,
state=State.QUEUED,
)
dag.clear()
assert len(DagRun.find(dag_id=dag.dag_id, state=State.QUEUED, session=session)) == 3
session = settings.Session()
self.scheduler_job._start_queued_dagruns(session)
session.commit()
# Assert that only 1 dagrun is active
assert len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)) == 1
# Assert that the other two are queued
assert len(DagRun.find(dag_id=dag.dag_id, state=State.QUEUED, session=session)) == 2
@patch.object(TaskInstance, 'handle_failure_with_callback')
def test_execute_on_failure_callbacks(self, mock_ti_handle_failure):
dagbag = DagBag(dag_folder="/dev/null", include_examples=True, read_dags_from_db=False)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
with create_session() as session:
session.query(TaskInstance).delete()
dag = dagbag.get_dag('example_branch_operator')
task = dag.get_task(task_id='run_this_first')
ti = TaskInstance(task, DEFAULT_DATE, State.RUNNING)
session.add(ti)
session.commit()
requests = [
TaskCallbackRequest(
full_filepath="A", simple_task_instance=SimpleTaskInstance(ti), msg="Message"
)
]
dag_file_processor.execute_callbacks(dagbag, requests)
mock_ti_handle_failure.assert_called_once_with(
error="Message",
test_mode=conf.getboolean('core', 'unit_test_mode'),
)
def test_failure_callbacks_should_not_drop_hostname(self):
dagbag = DagBag(dag_folder="/dev/null", include_examples=True, read_dags_from_db=False)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
dag_file_processor.UNIT_TEST_MODE = False
with create_session() as session:
dag = dagbag.get_dag('example_branch_operator')
task = dag.get_task(task_id='run_this_first')
ti = TaskInstance(task, DEFAULT_DATE, State.RUNNING)
ti.hostname = "test_hostname"
session.add(ti)
with create_session() as session:
requests = [
TaskCallbackRequest(
full_filepath="A", simple_task_instance=SimpleTaskInstance(ti), msg="Message"
)
]
dag_file_processor.execute_callbacks(dagbag, requests)
tis = session.query(TaskInstance)
assert tis[0].hostname == "test_hostname"
def test_process_file_should_failure_callback(self):
dag_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../dags/test_on_failure_callback.py'
)
dagbag = DagBag(dag_folder=dag_file, include_examples=False)
dag_file_processor = DagFileProcessor(dag_ids=[], log=mock.MagicMock())
with create_session() as session, NamedTemporaryFile(delete=False) as callback_file:
session.query(TaskInstance).delete()
dag = dagbag.get_dag('test_om_failure_callback_dag')
task = dag.get_task(task_id='test_om_failure_callback_task')
ti = TaskInstance(task, DEFAULT_DATE, State.RUNNING)
session.add(ti)
session.commit()
requests = [
TaskCallbackRequest(
full_filepath=dag.full_filepath,
simple_task_instance=SimpleTaskInstance(ti),
msg="Message",
)
]
callback_file.close()
with mock.patch.dict("os.environ", {"AIRFLOW_CALLBACK_FILE": callback_file.name}):
dag_file_processor.process_file(dag_file, requests)
with open(callback_file.name) as callback_file2:
content = callback_file2.read()
assert "Callback fired" == content
os.remove(callback_file.name)
def test_should_mark_dummy_task_as_success(self):
dag_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../dags/test_only_dummy_tasks.py'
)
# Write DAGs to dag and serialized_dag table
dagbag = DagBag(dag_folder=dag_file, include_examples=False, read_dags_from_db=False)
dagbag.sync_to_db()
self.scheduler_job_job = SchedulerJob(subdir=os.devnull)
self.scheduler_job_job.processor_agent = mock.MagicMock()
dag = self.scheduler_job_job.dagbag.get_dag("test_only_dummy_tasks")
# Create DagRun
session = settings.Session()
orm_dag = session.query(DagModel).get(dag.dag_id)
self.scheduler_job_job._create_dag_runs([orm_dag], session)
drs = DagRun.find(dag_id=dag.dag_id, session=session)
assert len(drs) == 1
dr = drs[0]
# Schedule TaskInstances
self.scheduler_job_job._schedule_dag_run(dr, session)
with create_session() as session:
tis = session.query(TaskInstance).all()
dags = self.scheduler_job_job.dagbag.dags.values()
assert ['test_only_dummy_tasks'] == [dag.dag_id for dag in dags]
assert 5 == len(tis)
assert {
('test_task_a', 'success'),
('test_task_b', None),
('test_task_c', 'success'),
('test_task_on_execute', 'scheduled'),
('test_task_on_success', 'scheduled'),
} == {(ti.task_id, ti.state) for ti in tis}
for state, start_date, end_date, duration in [
(ti.state, ti.start_date, ti.end_date, ti.duration) for ti in tis
]:
if state == 'success':
assert start_date is not None
assert end_date is not None
assert 0.0 == duration
else:
assert start_date is None
assert end_date is None
assert duration is None
self.scheduler_job_job._schedule_dag_run(dr, session)
with create_session() as session:
tis = session.query(TaskInstance).all()
assert 5 == len(tis)
assert {
('test_task_a', 'success'),
('test_task_b', 'success'),
('test_task_c', 'success'),
('test_task_on_execute', 'scheduled'),
('test_task_on_success', 'scheduled'),
} == {(ti.task_id, ti.state) for ti in tis}
for state, start_date, end_date, duration in [
(ti.state, ti.start_date, ti.end_date, ti.duration) for ti in tis
]:
if state == 'success':
assert start_date is not None
assert end_date is not None
assert 0.0 == duration
else:
assert start_date is None
assert end_date is None
assert duration is None
| 38.258398
| 110
| 0.644029
|
942c9abf967f5b0c04fc8059bf57049606587335
| 694
|
py
|
Python
|
base/migrations/0002_user_bio_user_name_alter_user_email.py
|
FDB09/discord-clone
|
f44eaef6332aefee60656ce956858c5f49e895b4
|
[
"MIT"
] | null | null | null |
base/migrations/0002_user_bio_user_name_alter_user_email.py
|
FDB09/discord-clone
|
f44eaef6332aefee60656ce956858c5f49e895b4
|
[
"MIT"
] | null | null | null |
base/migrations/0002_user_bio_user_name_alter_user_email.py
|
FDB09/discord-clone
|
f44eaef6332aefee60656ce956858c5f49e895b4
|
[
"MIT"
] | null | null | null |
# Generated by Django 4.0 on 2022-01-04 05:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='bio',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='user',
name='name',
field=models.CharField(max_length=200, null=True),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, null=True, unique=True),
),
]
| 23.931034
| 76
| 0.548991
|
cb992b1ef07610741428b47473b263f89f592bde
| 6,012
|
py
|
Python
|
connect.py
|
Ohrwurm2333/BiliLiveGiftThx
|
de602d6ab6f3c428c09927154ed2377445e7bfc7
|
[
"MIT"
] | null | null | null |
connect.py
|
Ohrwurm2333/BiliLiveGiftThx
|
de602d6ab6f3c428c09927154ed2377445e7bfc7
|
[
"MIT"
] | null | null | null |
connect.py
|
Ohrwurm2333/BiliLiveGiftThx
|
de602d6ab6f3c428c09927154ed2377445e7bfc7
|
[
"MIT"
] | null | null | null |
import asyncio
import utils
import bilibiliCilent
import printer
from bilibili import bilibili
from configloader import ConfigLoader
import random
from raven import Client
async def check_room_state(roomid):
json_rsp = await bilibili.req_room_init(roomid)
return json_rsp['data']['live_status']
async def get_one(areaid):
# 1 娱乐分区, 2 游戏分区, 3 手游分区, 4 绘画分区
if areaid == 1:
roomid = 23058
state = await check_room_state(roomid)
if state == 1:
printer.info([f'{areaid}号弹幕监控选择房间({roomid})'], True)
return roomid
while True:
json_rsp = await bilibili.req_realroomid(areaid)
data = json_rsp['data']
roomid = random.choice(data)['roomid']
state = await check_room_state(roomid)
if state == 1:
printer.info([f'{areaid}号弹幕监控选择房间({roomid})'], True)
return roomid
class connect():
__slots__ = ('danmuji')
instance = None
def __new__(cls, *args, **kw):
if not cls.instance:
cls.instance = super(connect, cls).__new__(cls, *args, **kw)
cls.instance.danmuji = None
return cls.instance
async def run(self):
self.danmuji = bilibiliCilent.DanmuPrinter()
while True:
print('# 正在启动直播监控弹幕姬')
time_start = int(utils.CurrentTime())
connect_results = await self.danmuji.connectServer()
# print(connect_results)
if not connect_results:
continue
task_main = asyncio.ensure_future(self.danmuji.ReceiveMessageLoop())
task_heartbeat = asyncio.ensure_future(self.danmuji.HeartbeatLoop())
finished, pending = await asyncio.wait([task_main, task_heartbeat], return_when=asyncio.FIRST_COMPLETED)
print('主弹幕姬异常或主动断开,正在处理剩余信息')
time_end = int(utils.CurrentTime())
if not task_heartbeat.done():
task_heartbeat.cancel()
task_terminate = asyncio.ensure_future(self.danmuji.close_connection())
await asyncio.wait(pending)
await asyncio.wait([task_terminate])
printer.info(['主弹幕姬退出,剩余任务处理完毕'], True)
if time_end - time_start < 5:
print('# 当前网络不稳定,为避免频繁不必要尝试,将自动在5秒后重试')
await asyncio.sleep(5)
@staticmethod
async def reconnect(roomid):
ConfigLoader().dic_user['other_control']['default_monitor_roomid'] = roomid
print('已经切换roomid')
if connect.instance.danmuji is not None:
connect.instance.danmuji.roomid = roomid
await connect.instance.danmuji.close_connection()
class RaffleConnect():
def __init__(self, areaid):
self.danmuji = None
self.roomid = 0
self.areaid = areaid
async def run(self):
self.danmuji = bilibiliCilent.DanmuRaffleHandler(self.roomid, self.areaid)
while True:
self.danmuji.roomid = await get_one(self.areaid)
printer.info(['# 正在启动抽奖监控弹幕姬'], True)
time_start = int(utils.CurrentTime())
connect_results = await self.danmuji.connectServer()
# print(connect_results)
if not connect_results:
continue
task_main = asyncio.ensure_future(self.danmuji.ReceiveMessageLoop())
task_heartbeat = asyncio.ensure_future(self.danmuji.HeartbeatLoop())
task_checkarea = asyncio.ensure_future(self.danmuji.CheckArea())
finished, pending = await asyncio.wait([task_main, task_heartbeat, task_checkarea], return_when=asyncio.FIRST_COMPLETED)
printer.info([f'{self.areaid}号弹幕姬异常或主动断开,正在处理剩余信息'], True)
time_end = int(utils.CurrentTime())
if not task_heartbeat.done():
task_heartbeat.cancel()
if not task_checkarea.done():
task_checkarea.cancel()
task_terminate = asyncio.ensure_future(self.danmuji.close_connection())
await asyncio.wait(pending)
await asyncio.wait([task_terminate])
printer.info([f'{self.areaid}号弹幕姬退出,剩余任务处理完毕'], True)
if time_end - time_start < 5:
print('# 当前网络不稳定,为避免频繁不必要尝试,将自动在5秒后重试')
dsn = ConfigLoader().dic_user['other_control']['sentry_dsn']
client = Client(dsn)
try:
raise Exception('网络不稳定,重试中')
except:
client.captureException()
await asyncio.sleep(5)
class YjConnection():
def __init__(self):
self.danmuji = None
self.roomid = 0
self.areaid = -1
async def run(self):
self.roomid = ConfigLoader().dic_user['other_control']['raffle_minitor_roomid']
if not self.roomid:
print('hjjkkk结束了坎坎坷坷坎坎坷坷')
return
self.danmuji = bilibiliCilent.YjMonitorHandler(self.roomid, self.areaid)
while True:
print('# 正在启动直播监控弹幕姬')
time_start = int(utils.CurrentTime())
connect_results = await self.danmuji.connectServer()
# print(connect_results)
if not connect_results:
continue
task_main = asyncio.ensure_future(self.danmuji.ReceiveMessageLoop())
task_heartbeat = asyncio.ensure_future(self.danmuji.HeartbeatLoop())
finished, pending = await asyncio.wait([task_main, task_heartbeat], return_when=asyncio.FIRST_COMPLETED)
print('主弹幕姬异常或主动断开,正在处理剩余信息')
time_end = int(utils.CurrentTime())
if not task_heartbeat.done():
task_heartbeat.cancel()
task_terminate = asyncio.ensure_future(self.danmuji.close_connection())
await asyncio.wait(pending)
await asyncio.wait([task_terminate])
printer.info(['主弹幕姬退出,剩余任务处理完毕'], True)
if time_end - time_start < 5:
print('# 当前网络不稳定,为避免频繁不必要尝试,将自动在5秒后重试')
await asyncio.sleep(5)
| 39.81457
| 132
| 0.61477
|
68b6deb8806720cb3b86c31f374558d46c06c657
| 11,329
|
py
|
Python
|
roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
|
ramkrsna/openshift-ansible
|
fc96d8d22f6c277b599e6e2fa4e9cc06814a9460
|
[
"Apache-2.0"
] | null | null | null |
roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
|
ramkrsna/openshift-ansible
|
fc96d8d22f6c277b599e6e2fa4e9cc06814a9460
|
[
"Apache-2.0"
] | null | null | null |
roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
|
ramkrsna/openshift-ansible
|
fc96d8d22f6c277b599e6e2fa4e9cc06814a9460
|
[
"Apache-2.0"
] | 1
|
2019-10-28T15:03:29.000Z
|
2019-10-28T15:03:29.000Z
|
import copy
import os
import sys
from ansible.errors import AnsibleError
from nose.tools import raises, assert_equal
sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../lookup_plugins/")] + sys.path
from openshift_master_facts_default_predicates import LookupModule # noqa: E402
# Predicates ordered according to OpenShift Origin source:
# origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
DEFAULT_PREDICATES_1_1 = [
{'name': 'PodFitsHostPorts'},
{'name': 'PodFitsResources'},
{'name': 'NoDiskConflict'},
{'name': 'MatchNodeSelector'},
]
DEFAULT_PREDICATES_1_2 = [
{'name': 'PodFitsHostPorts'},
{'name': 'PodFitsResources'},
{'name': 'NoDiskConflict'},
{'name': 'NoVolumeZoneConflict'},
{'name': 'MatchNodeSelector'},
{'name': 'MaxEBSVolumeCount'},
{'name': 'MaxGCEPDVolumeCount'}
]
DEFAULT_PREDICATES_1_3 = [
{'name': 'NoDiskConflict'},
{'name': 'NoVolumeZoneConflict'},
{'name': 'MaxEBSVolumeCount'},
{'name': 'MaxGCEPDVolumeCount'},
{'name': 'GeneralPredicates'},
{'name': 'PodToleratesNodeTaints'},
{'name': 'CheckNodeMemoryPressure'}
]
DEFAULT_PREDICATES_1_4 = [
{'name': 'NoDiskConflict'},
{'name': 'NoVolumeZoneConflict'},
{'name': 'MaxEBSVolumeCount'},
{'name': 'MaxGCEPDVolumeCount'},
{'name': 'GeneralPredicates'},
{'name': 'PodToleratesNodeTaints'},
{'name': 'CheckNodeMemoryPressure'},
{'name': 'CheckNodeDiskPressure'},
{'name': 'MatchInterPodAffinity'}
]
DEFAULT_PREDICATES_1_5 = [
{'name': 'NoVolumeZoneConflict'},
{'name': 'MaxEBSVolumeCount'},
{'name': 'MaxGCEPDVolumeCount'},
{'name': 'MatchInterPodAffinity'},
{'name': 'NoDiskConflict'},
{'name': 'GeneralPredicates'},
{'name': 'PodToleratesNodeTaints'},
{'name': 'CheckNodeMemoryPressure'},
{'name': 'CheckNodeDiskPressure'},
]
REGION_PREDICATE = {
'name': 'Region',
'argument': {
'serviceAffinity': {
'labels': ['region']
}
}
}
TEST_VARS = [
('1.1', 'origin', DEFAULT_PREDICATES_1_1),
('3.1', 'openshift-enterprise', DEFAULT_PREDICATES_1_1),
('1.2', 'origin', DEFAULT_PREDICATES_1_2),
('3.2', 'openshift-enterprise', DEFAULT_PREDICATES_1_2),
('1.3', 'origin', DEFAULT_PREDICATES_1_3),
('3.3', 'openshift-enterprise', DEFAULT_PREDICATES_1_3),
('1.4', 'origin', DEFAULT_PREDICATES_1_4),
('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
('1.5', 'origin', DEFAULT_PREDICATES_1_5),
('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
('1.6', 'origin', DEFAULT_PREDICATES_1_5),
('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
]
class TestOpenShiftMasterFactsDefaultPredicates(object):
def setUp(self):
self.lookup = LookupModule()
self.default_facts = {
'openshift': {
'common': {}
}
}
@raises(AnsibleError)
def test_missing_short_version_and_missing_openshift_release(self):
facts = copy.deepcopy(self.default_facts)
facts['openshift']['common']['deployment_type'] = 'origin'
self.lookup.run(None, variables=facts)
def check_defaults_short_version(self, short_version, deployment_type, default_predicates,
regions_enabled):
facts = copy.deepcopy(self.default_facts)
facts['openshift']['common']['short_version'] = short_version
facts['openshift']['common']['deployment_type'] = deployment_type
results = self.lookup.run(None, variables=facts,
regions_enabled=regions_enabled)
if regions_enabled:
assert_equal(results, default_predicates + [REGION_PREDICATE])
else:
assert_equal(results, default_predicates)
def check_defaults_short_version_kwarg(self, short_version, deployment_type, default_predicates,
regions_enabled):
facts = copy.deepcopy(self.default_facts)
facts['openshift']['common']['deployment_type'] = deployment_type
results = self.lookup.run(None, variables=facts,
regions_enabled=regions_enabled,
short_version=short_version)
if regions_enabled:
assert_equal(results, default_predicates + [REGION_PREDICATE])
else:
assert_equal(results, default_predicates)
def check_defaults_deployment_type_kwarg(self, short_version, deployment_type,
default_predicates, regions_enabled):
facts = copy.deepcopy(self.default_facts)
facts['openshift']['common']['short_version'] = short_version
results = self.lookup.run(None, variables=facts,
regions_enabled=regions_enabled,
deployment_type=deployment_type)
if regions_enabled:
assert_equal(results, default_predicates + [REGION_PREDICATE])
else:
assert_equal(results, default_predicates)
def check_defaults_only_kwargs(self, short_version, deployment_type,
default_predicates, regions_enabled):
facts = copy.deepcopy(self.default_facts)
results = self.lookup.run(None, variables=facts,
regions_enabled=regions_enabled,
short_version=short_version,
deployment_type=deployment_type)
if regions_enabled:
assert_equal(results, default_predicates + [REGION_PREDICATE])
else:
assert_equal(results, default_predicates)
def check_defaults_release(self, release, deployment_type, default_predicates,
regions_enabled):
facts = copy.deepcopy(self.default_facts)
facts['openshift_release'] = release
facts['openshift']['common']['deployment_type'] = deployment_type
results = self.lookup.run(None, variables=facts,
regions_enabled=regions_enabled)
if regions_enabled:
assert_equal(results, default_predicates + [REGION_PREDICATE])
else:
assert_equal(results, default_predicates)
def check_defaults_version(self, version, deployment_type, default_predicates,
regions_enabled):
facts = copy.deepcopy(self.default_facts)
facts['openshift_version'] = version
facts['openshift']['common']['deployment_type'] = deployment_type
results = self.lookup.run(None, variables=facts,
regions_enabled=regions_enabled)
if regions_enabled:
assert_equal(results, default_predicates + [REGION_PREDICATE])
else:
assert_equal(results, default_predicates)
def check_defaults_override_vars(self, release, deployment_type,
default_predicates, regions_enabled,
extra_facts=None):
facts = copy.deepcopy(self.default_facts)
facts['openshift']['common']['short_version'] = release
facts['openshift']['common']['deployment_type'] = deployment_type
if extra_facts is not None:
for fact in extra_facts:
facts[fact] = extra_facts[fact]
results = self.lookup.run(None, variables=facts,
regions_enabled=regions_enabled,
return_set_vars=False)
if regions_enabled:
assert_equal(results, default_predicates + [REGION_PREDICATE])
else:
assert_equal(results, default_predicates)
def test_openshift_version(self):
for regions_enabled in (True, False):
for release, deployment_type, default_predicates in TEST_VARS:
release = release + '.1'
yield self.check_defaults_version, release, deployment_type, default_predicates, regions_enabled
def test_v_release_defaults(self):
for regions_enabled in (True, False):
for release, deployment_type, default_predicates in TEST_VARS:
yield self.check_defaults_release, 'v' + release, deployment_type, default_predicates, regions_enabled
def test_release_defaults(self):
for regions_enabled in (True, False):
for release, deployment_type, default_predicates in TEST_VARS:
yield self.check_defaults_release, release, deployment_type, default_predicates, regions_enabled
def test_short_version_defaults(self):
for regions_enabled in (True, False):
for release, deployment_type, default_predicates in TEST_VARS:
yield self.check_defaults_short_version, release, deployment_type, default_predicates, regions_enabled
def test_short_version_kwarg(self):
for regions_enabled in (True, False):
for release, deployment_type, default_predicates in TEST_VARS:
yield self.check_defaults_short_version_kwarg, release, deployment_type, default_predicates, regions_enabled
def test_only_kwargs(self):
for regions_enabled in (True, False):
for release, deployment_type, default_predicates in TEST_VARS:
yield self.check_defaults_only_kwargs, release, deployment_type, default_predicates, regions_enabled
def test_deployment_type_kwarg(self):
for regions_enabled in (True, False):
for release, deployment_type, default_predicates in TEST_VARS:
yield self.check_defaults_deployment_type_kwarg, release, deployment_type, default_predicates, regions_enabled
def test_trunc_openshift_release(self):
for release, deployment_type, default_predicates in TEST_VARS:
release = release + '.1'
yield self.check_defaults_release, release, deployment_type, default_predicates, False
@raises(AnsibleError)
def test_unknown_deployment_types(self):
facts = copy.deepcopy(self.default_facts)
facts['openshift']['common']['short_version'] = '1.1'
facts['openshift']['common']['deployment_type'] = 'bogus'
self.lookup.run(None, variables=facts)
@raises(AnsibleError)
def test_unknown_origin_version(self):
facts = copy.deepcopy(self.default_facts)
facts['openshift']['common']['short_version'] = '0.1'
facts['openshift']['common']['deployment_type'] = 'origin'
self.lookup.run(None, variables=facts)
@raises(AnsibleError)
def test_unknown_ocp_version(self):
facts = copy.deepcopy(self.default_facts)
facts['openshift']['common']['short_version'] = '0.1'
facts['openshift']['common']['deployment_type'] = 'openshift-enterprise'
self.lookup.run(None, variables=facts)
@raises(AnsibleError)
def test_missing_deployment_type(self):
facts = copy.deepcopy(self.default_facts)
facts['openshift']['common']['short_version'] = '10.10'
self.lookup.run(None, variables=facts)
@raises(AnsibleError)
def testMissingOpenShiftFacts(self):
facts = {}
self.lookup.run(None, variables=facts)
| 42.430712
| 126
| 0.648071
|
aadbaf15703afbb455473d8545ff892baac3698e
| 3,246
|
py
|
Python
|
app/app/settings.py
|
hizikNewton/cocoa-disease-prediction
|
b4073fcc653c074383e3e6523617bb69172950e2
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
hizikNewton/cocoa-disease-prediction
|
b4073fcc653c074383e3e6523617bb69172950e2
|
[
"MIT"
] | 6
|
2020-06-06T00:35:44.000Z
|
2022-02-10T11:08:41.000Z
|
app/app/settings.py
|
hizikNewton/cocoa-disease-prediction
|
b4073fcc653c074383e3e6523617bb69172950e2
|
[
"MIT"
] | null | null | null |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_u5wwfked2+x9mf2=-6z2l8sn_xdwt5%$@nvpv4t3rl#vxl!v*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware'
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST':os.environ.get('DB_HOST'),
'NAME':os.environ.get('DB_NAME'),
'USER':os.environ.get('DB_USER'),
'PASSWORD':os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| 25.359375
| 91
| 0.689464
|
742b3127e2d30985d7a328bc52c3ff274e83fb8d
| 223
|
py
|
Python
|
tests/FlaskTest/configs/notification_config_disabled.py
|
Laneglos/error-tracker
|
b07366e94199fc5157ddc5623fa12c8c0d07c483
|
[
"BSD-3-Clause"
] | 16
|
2019-12-17T10:57:43.000Z
|
2022-01-30T13:03:53.000Z
|
tests/FlaskTest/configs/notification_config_disabled.py
|
Laneglos/error-tracker
|
b07366e94199fc5157ddc5623fa12c8c0d07c483
|
[
"BSD-3-Clause"
] | 15
|
2020-01-08T12:08:32.000Z
|
2022-01-28T13:16:48.000Z
|
tests/FlaskTest/configs/notification_config_disabled.py
|
Laneglos/error-tracker
|
b07366e94199fc5157ddc5623fa12c8c0d07c483
|
[
"BSD-3-Clause"
] | 8
|
2020-01-08T14:10:14.000Z
|
2021-01-31T22:26:07.000Z
|
APP_ERROR_SEND_NOTIFICATION = True
APP_ERROR_RECIPIENT_EMAIL = None
APP_ERROR_SUBJECT_PREFIX = ""
APP_ERROR_MASK_WITH = "**************"
APP_ERROR_MASKED_KEY_HAS = ("password", "secret")
APP_ERROR_URL_PREFIX = "/dev/error"
| 31.857143
| 49
| 0.757848
|
6e3f842840c53a0accb08f18b9886313ddd5f60d
| 2,938
|
py
|
Python
|
tests/test_get_all_info.py
|
suecharo/agodashi
|
8d063c2e9fabf881535287086d0b0edc66376d78
|
[
"Apache-2.0"
] | null | null | null |
tests/test_get_all_info.py
|
suecharo/agodashi
|
8d063c2e9fabf881535287086d0b0edc66376d78
|
[
"Apache-2.0"
] | null | null | null |
tests/test_get_all_info.py
|
suecharo/agodashi
|
8d063c2e9fabf881535287086d0b0edc66376d78
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf-8
from argparse import Namespace
from typing import Dict, Union
from flask import Flask
from flask.testing import FlaskClient
from flask.wrappers import Response
from agodashi.app import create_app, handle_default_params, parse_args
from agodashi.type import AllInformation, ErrorResponse
from .resource_list import CWL_WF, CWL_WF_PACKED, CWL_WF_REMOTE
def test_wf(delete_env_vars: None) -> None:
args: Namespace = parse_args([])
params: Dict[str, Union[str, int]] = handle_default_params(args)
app: Flask = create_app(params)
app.debug = params["debug"] # type: ignore
app.testing = True
client: FlaskClient[Response] = app.test_client()
with CWL_WF.open(mode="r") as f:
wf_content: str = f.read()
response: Response = \
client.post("/inspect-workflow",
data={"wf_content": wf_content},
content_type="multipart/form-data")
res_data: ErrorResponse = response.get_json()
print(response)
print(res_data)
assert response.status_code == 400
assert "cwltool ended with status 1" in res_data["msg"]
def test_wf_remote(delete_env_vars: None) -> None:
args: Namespace = parse_args([])
params: Dict[str, Union[str, int]] = handle_default_params(args)
app: Flask = create_app(params)
app.debug = params["debug"] # type: ignore
app.testing = True
client: FlaskClient[Response] = app.test_client()
with CWL_WF_REMOTE.open(mode="r") as f:
wf_content: str = f.read()
response: Response = \
client.post("/inspect-workflow",
data={"wf_content": wf_content},
content_type="multipart/form-data")
res_data: AllInformation = response.get_json()
print(response)
print(res_data)
assert response.status_code == 200
assert "wf_type" in res_data
assert "wf_version" in res_data
assert "wf_params" in res_data
assert "CWL" == res_data["wf_type"]
assert "v1.0" == res_data["wf_version"]
def test_wf_packed(delete_env_vars: None) -> None:
args: Namespace = parse_args([])
params: Dict[str, Union[str, int]] = handle_default_params(args)
app: Flask = create_app(params)
app.debug = params["debug"] # type: ignore
app.testing = True
client: FlaskClient[Response] = app.test_client()
with CWL_WF_PACKED.open(mode="r") as f:
wf_content: str = f.read()
response: Response = \
client.post("/inspect-workflow",
data={"wf_content": wf_content},
content_type="multipart/form-data")
res_data: AllInformation = response.get_json()
print(response)
print(res_data)
assert response.status_code == 200
assert "wf_type" in res_data
assert "wf_version" in res_data
assert "wf_params" in res_data
assert "CWL" == res_data["wf_type"]
assert "v1.0" == res_data["wf_version"]
| 33.386364
| 70
| 0.66644
|
65b065fbdac84025e7098150d7c35db776ddddd2
| 2,390
|
py
|
Python
|
tools/db-json.py
|
marcboeren/millionnl
|
688cd401a7d3a12ceee046f3df1d403e18d89ed3
|
[
"MIT"
] | null | null | null |
tools/db-json.py
|
marcboeren/millionnl
|
688cd401a7d3a12ceee046f3df1d403e18d89ed3
|
[
"MIT"
] | null | null | null |
tools/db-json.py
|
marcboeren/millionnl
|
688cd401a7d3a12ceee046f3df1d403e18d89ed3
|
[
"MIT"
] | null | null | null |
import _mysql
import os
def esc(s):
return s.replace('"', '\\"').replace('\r\n', '\n').replace('\r', '\n').replace('\n', '\\n')
db = _mysql.connect("localhost", "REDACTED", "REDACTED", "milliondb")
items = {}
db.query("select create_date, title, image, slug, intro, extract, story from story_story where export = 1 order by create_date desc")
r = db.store_result()
for story in r.fetch_row(0):
context = { 'timestamp': esc(story[0]),
'title': esc(story[1]),
'image': esc(story[2]),
'slug': esc(story[3]),
'text': esc(story[4]),
'story': esc(story[4]+"\n\n"+story[5]+"\n\n"+story[6]),
}
items[context['timestamp']] = """{"timestamp":"%(timestamp)s",
"type":"story",
"title":"%(title)s",
"image":"%(image)s",
"slug":"%(slug)s",
"text":"%(text)s",
"count":1
}""" % context
fstory = file('json/story/'+story[3]+'.json', 'wb')
fstory.write("""{"timestamp":"%(timestamp)s",
"type":"story",
"title":"%(title)s",
"image":"%(image)s",
"slug":"%(slug)s",
"text":"%(story)s"
},
""" % context)
fstory.close()
if not os.path.exists('stories/'+context['slug']):
os.makedirs('stories/'+context['slug'])
print story[3]
db.query("select create_date, title, image, slug, extract from gallery_gallery where export = 1 order by create_date desc")
r = db.store_result()
for gallery in r.fetch_row(0):
context = { 'timestamp': esc(gallery[0]),
'title': esc(gallery[1]),
'image': esc(gallery[2]),
'slug': esc(gallery[3]),
'text': esc(gallery[4]),
}
items[context['timestamp']] = """{"timestamp":"%(timestamp)s",
"type":"gallery",
"title":"%(title)s",
"image":"%(image)s",
"slug":"%(slug)s",
"text":"%(text)s",
"count":1
}""" % context
fgallery = file('json/gallery/'+gallery[3]+'.json', 'wb')
fgallery.write("""{"timestamp":"%(timestamp)s",
"type":"gallery",
"title":"%(title)s",
"slug":"%(slug)s",
"text":"%(text)s",
"photos":[]
},
""" % context)
fgallery.close()
if not os.path.exists('galleries/'+context['slug']):
os.makedirs('galleries/'+context['slug'])
print gallery[3]
findex = file('json/index.json', 'wb')
findex.write("[\n")
findex.write(",\n".join(sorted(items.values(), reverse=True)))
findex.write("]")
findex.close()
| 29.875
| 133
| 0.553975
|
575ad7c13d6b679b7f8a3d0f931a61130d656d51
| 1,669
|
py
|
Python
|
tests/test_utils.py
|
jrcastro2/invenio-oauth2server
|
f74bbe1ea19656831ac94946999100b65e6fe7dd
|
[
"MIT"
] | 3
|
2015-08-19T12:51:12.000Z
|
2017-10-25T00:58:52.000Z
|
tests/test_utils.py
|
jrcastro2/invenio-oauth2server
|
f74bbe1ea19656831ac94946999100b65e6fe7dd
|
[
"MIT"
] | 157
|
2015-08-04T12:14:23.000Z
|
2021-06-02T14:59:10.000Z
|
tests/test_utils.py
|
jrcastro2/invenio-oauth2server
|
f74bbe1ea19656831ac94946999100b65e6fe7dd
|
[
"MIT"
] | 44
|
2015-08-03T17:05:27.000Z
|
2022-01-19T19:06:53.000Z
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test case for rebuilding access tokens."""
import sys
import pytest
from invenio_db import db
from invenio_oauth2server.models import Token
from invenio_oauth2server.utils import rebuild_access_tokens
def test_rebuilding_access_tokens(models_fixture):
"""Test rebuilding access tokens with random new SECRET_KEY."""
app = models_fixture
with app.app_context():
old_secret_key = app.secret_key
tokens_before = Token.query.order_by(Token.id).all()
# Changing application SECRET_KEY
app.secret_key = "NEW_SECRET_KEY"
db.session.expunge_all()
# Asserting the decoding error occurs with the stale SECRET_KEY
if sys.version_info[0] < 3: # python 2
token = Token.query.filter(Token.id == tokens_before[0].id).one()
assert token.access_token != tokens_before[0].access_token
else: # python 3
with pytest.raises(ValueError):
Token.query.filter(Token.id == tokens_before[0].id).one()
db.session.expunge_all()
rebuild_access_tokens(old_secret_key)
tokens_after = Token.query.order_by(Token.id).all()
for token_before, token_after in list(zip(tokens_before,
tokens_after)):
assert token_before.access_token == token_after.access_token
assert token_before.refresh_token == token_after.refresh_token
| 35.510638
| 77
| 0.676453
|
f23b176296c2700d1e3a7c58aa63200afa9c9fc2
| 344
|
py
|
Python
|
mixed/balanced_brackets.py
|
RaoulMa/NeuralNets
|
f49072ac88686f753f9b5815d6cc5e71d536c3d2
|
[
"MIT"
] | 1
|
2018-06-30T08:41:49.000Z
|
2018-06-30T08:41:49.000Z
|
mixed/balanced_brackets.py
|
RaoulMa/BasicNeuralNets
|
f49072ac88686f753f9b5815d6cc5e71d536c3d2
|
[
"MIT"
] | null | null | null |
mixed/balanced_brackets.py
|
RaoulMa/BasicNeuralNets
|
f49072ac88686f753f9b5815d6cc5e71d536c3d2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Balanced brackets.
"""
def is_Balanced(s):
table = {')':'(', ']':'[', '}':'{'}
stack = []
for x in s:
if stack and table.get(x) == stack[-1]:
stack.pop()
else:
stack.append(x)
print("NO" if stack else "YES")
is_Balanced('[({})]')
| 19.111111
| 47
| 0.450581
|
e9ca6211cc9cd039dade4acaee7599b461f8da60
| 4,319
|
py
|
Python
|
pychron/envisage/consoleable.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/envisage/consoleable.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/envisage/consoleable.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
# ============= standard library imports ========================
from datetime import datetime
from traits.trait_types import Bool, Instance, Int
from traits.traits import Color
# ============= local library imports ==========================
from pychron.loggable import Loggable
from pychron.pychron_constants import LIGHT_YELLOW
class Consoleable(Loggable):
use_message_colormapping = Bool
console_display = Instance("pychron.core.displays.display.DisplayController")
# console_updated = Event
console_bgcolor = LIGHT_YELLOW
console_fontsize = Int(11)
console_default_color = Color("black")
def console_bind_preferences(self, prefid):
from pychron.core.ui.preference_binding import (
color_bind_preference,
bind_preference,
)
color_bind_preference(self, "console_bgcolor", "{}.bgcolor".format(prefid))
color_bind_preference(
self, "console_default_color", "{}.textcolor".format(prefid)
)
bind_preference(self, "console_fontsize", "{}.fontsize".format(prefid))
def console_set_preferences(self, preferences, prefid):
from pychron.core.ui.preference_binding import (
set_preference,
color_set_preference,
)
color_set_preference(
preferences, self, "console_bgcolor", "{}.bg_color".format(prefid)
)
color_set_preference(
preferences, self, "console_default_color", "{}.textcolor".format(prefid)
)
set_preference(
preferences,
self,
"console_fontsize",
"{}.fontsize".format(prefid),
cast=int,
)
def warning(self, msg, log=True, color=None, *args, **kw):
super(Consoleable, self).warning(msg, *args, **kw)
if color is None:
color = "red"
msg = msg.upper()
if self.console_display:
self.console_display.add_text(msg, color=color)
# self.console_updated = '{}|{}'.format(color, msg)
def heading(self, msg, decorate_chr="*", *args, **kw):
d = decorate_chr * 7
msg = "{} {} {}".format(d, msg, d)
self.info(msg)
def info(self, msg, log=True, color=None, *args, **kw):
if color is None: # or not self.use_message_colormapping:
color = self.console_default_color
if self.console_display:
t = datetime.now().strftime("%H:%M:%S")
msg = "{} -- {}".format(t, msg)
self.console_display.add_text(msg, color=color)
if log:
super(Consoleable, self).info(msg, *args, **kw)
# self.console_updated = '{}|{}'.format(color, msg)
def info_marker(self, char="=", color=None):
if color is None:
color = self.console_default_color
if self.console_display:
self.console_display.add_marker(char, color=color)
def info_heading(self, msg):
self.info("")
self.info_marker("=")
self.info(msg)
self.info_marker("=")
self.info("")
def _console_display_default(self):
from pychron.core.displays.display import DisplayController
return DisplayController(
bgcolor=self.console_bgcolor,
font_size=self.console_fontsize,
default_color=self.console_default_color,
max_blocks=100,
)
# ============= EOF =============================================
| 33.742188
| 85
| 0.590646
|
79ecdf459685b43a61d528c30dcec15ae6fdc484
| 2,864
|
py
|
Python
|
kgr/conv_e.py
|
dertilo/knowledge-graph-reasoning
|
e36d57ee34aa2b532f4dfa98a1e1d222037337cc
|
[
"MIT"
] | null | null | null |
kgr/conv_e.py
|
dertilo/knowledge-graph-reasoning
|
e36d57ee34aa2b532f4dfa98a1e1d222037337cc
|
[
"MIT"
] | null | null | null |
kgr/conv_e.py
|
dertilo/knowledge-graph-reasoning
|
e36d57ee34aa2b532f4dfa98a1e1d222037337cc
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
import torch
import torch.nn as nn
import torch.nn.functional as F
@dataclass
class Config:
entity_dim:int=200
relation_dim:int=200
emb_dropout_rate:float=0.3
hidden_dropout_rate:float=0.3
feat_dropout_rate:float=0.2
emb_2D_d1:int=10
emb_2D_d2:int=20
num_out_channels:int=32
kernel_size:int=3
class ConvE(nn.Module):
def __init__(self, config:Config, num_entities, num_relations)-> None:
super().__init__()
entity_dim = config.entity_dim
emb_dropout_rate = config.emb_dropout_rate
assert config.emb_2D_d1 * config.emb_2D_d2 == entity_dim
assert config.emb_2D_d1 * config.emb_2D_d2 == config.relation_dim
self.emb_2D_d1 = config.emb_2D_d1
self.emb_2D_d2 = config.emb_2D_d2
self.num_out_channels = config.num_out_channels
self.w_d = config.kernel_size
self.HiddenDropout = nn.Dropout(config.hidden_dropout_rate)
self.FeatureDropout = nn.Dropout(config.feat_dropout_rate)
# stride = 1, padding = 0, dilation = 1, groups = 1
self.conv1 = nn.Conv2d(1, self.num_out_channels, (self.w_d, self.w_d), 1, 0)
self.bn0 = nn.BatchNorm2d(1)
self.bn1 = nn.BatchNorm2d(self.num_out_channels)
self.bn2 = nn.BatchNorm1d(entity_dim)
self.register_parameter("b", nn.Parameter(torch.zeros(num_entities)))
h_out = 2 * self.emb_2D_d1 - self.w_d + 1
w_out = self.emb_2D_d2 - self.w_d + 1
self.feat_dim = self.num_out_channels * h_out * w_out
self.fc = nn.Linear(self.feat_dim, entity_dim)
self.entity_embeddings = nn.Embedding(num_entities, entity_dim)
self.EDropout = nn.Dropout(emb_dropout_rate)
self.relation_embeddings = nn.Embedding(num_relations, config.relation_dim)
self.RDropout = nn.Dropout(emb_dropout_rate)
self.initialize_modules()
def initialize_modules(self):
nn.init.xavier_normal_(self.entity_embeddings.weight)
nn.init.xavier_normal_(self.relation_embeddings.weight)
def forward(self, e1, r):
E1 = self.EDropout(self.entity_embeddings(e1)).view(-1, 1, self.emb_2D_d1, self.emb_2D_d2)
R = self.RDropout(self.relation_embeddings(r)).view(-1, 1, self.emb_2D_d1, self.emb_2D_d2)
all_embeddings = self.EDropout(self.entity_embeddings.weight)
stacked_inputs = torch.cat([E1, R], 2)
stacked_inputs = self.bn0(stacked_inputs)
X = self.conv1(stacked_inputs)
# X = self.bn1(X)
X = F.relu(X)
X = self.FeatureDropout(X)
X = X.view(-1, self.feat_dim)
X = self.fc(X)
X = self.HiddenDropout(X)
X = self.bn2(X)
X = F.relu(X)
X = torch.mm(X, all_embeddings.transpose(1, 0))
X += self.b.expand_as(X)
S = F.sigmoid(X)
return S
| 36.717949
| 98
| 0.665503
|
03d7d2607d5528bcab473b36b752f8c06fe8f1b8
| 2,162
|
py
|
Python
|
pins/get_pins2.py
|
lpe234/juejin_pins
|
498d6689a62f8f8dc21d307ab00b70507676eabc
|
[
"MulanPSL-1.0"
] | 1
|
2020-09-02T04:48:13.000Z
|
2020-09-02T04:48:13.000Z
|
pins/get_pins2.py
|
lpe234/juejin_pins
|
498d6689a62f8f8dc21d307ab00b70507676eabc
|
[
"MulanPSL-1.0"
] | null | null | null |
pins/get_pins2.py
|
lpe234/juejin_pins
|
498d6689a62f8f8dc21d307ab00b70507676eabc
|
[
"MulanPSL-1.0"
] | null | null | null |
# -*- coding: UTF-8 -*-
import os
import time
import logging
import requests
__author__ = 'lpe234'
sess = requests.Session()
sess.headers = {
'accept': '*/*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8',
'cache-control': 'no-cache',
'content-type': 'application/json',
'origin': 'https://juejin.im',
'pragma': 'no-cache',
'referer': 'https://juejin.im/pins/hot',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-site',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36',
}
# 沸点URL
HOT_URL = 'https://apinew.juejin.im/recommend_api/v1/short_msg/hot'
# 数据保存地址
DATA_PATH = 'json_data'
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(filename)s - %(levelname)s: %(message)s', )
def save_pins(idx=0, cursor='0'):
"""
存储沸点数据
:param idx: 索引
:param cursor: 游标指针
:return:
"""
json_data = {
'id_type': 4,
'sort_type': 200,
'cursor': cursor,
'limit': 200
}
resp = sess.post(HOT_URL, json=json_data)
if resp.ok:
resp_json = resp.json()
with open(f'{DATA_PATH}/pins-{idx:04}.json', 'w+') as json_file:
json_file.write(resp.content.decode('UTF-8'))
# 是否还有更多
if resp_json['err_no'] == 0 and resp_json['err_msg'] == 'success':
logging.debug(f'no error, idx={idx}')
if resp_json['has_more']:
logging.debug(f'has more, next idx={idx+1}')
time.sleep(5)
save_pins(idx+1, cursor=resp_json['cursor'])
else:
# 出了异常
logging.warning(resp_json['err_msg'])
logging.debug(f'sleep 10s, retry idx={idx}')
time.sleep(10)
save_pins(idx, cursor)
def check_path():
"""
校验data_path
:return:
"""
logging.debug('check data_path')
if not os.path.exists(DATA_PATH):
os.mkdir(DATA_PATH, mode=0o744)
if __name__ == '__main__':
check_path()
save_pins()
| 25.738095
| 142
| 0.571693
|
2d41302beaf3fc0291f260c333b40820ff3e43dd
| 456
|
py
|
Python
|
find_max_occurence_sentence.py
|
swatmantis/my-pyscripts
|
e16af5879b101c30e34e82727292849d1d33f440
|
[
"Apache-2.0"
] | null | null | null |
find_max_occurence_sentence.py
|
swatmantis/my-pyscripts
|
e16af5879b101c30e34e82727292849d1d33f440
|
[
"Apache-2.0"
] | null | null | null |
find_max_occurence_sentence.py
|
swatmantis/my-pyscripts
|
e16af5879b101c30e34e82727292849d1d33f440
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#Program to find the characters that repeats the most
from pprint import pprint
sentence = "This is a common inteview question"
occurence = {}
for char in sentence:
if char not in occurence:
occurence[char] = sentence.count(char)
# pprint(occurence, width=1)
occurence_sorted = sorted(occurence.items(), key=lambda kv: kv[1], reverse=True)
pprint(occurence_sorted)
print("max_frequency_char =", occurence_sorted[0])
| 25.333333
| 80
| 0.743421
|
cb2a1216e102e752f9497d4825db483d5ac78178
| 3,593
|
py
|
Python
|
haweb/apps/core/admin.py
|
edilio/tobeawebproperty
|
317205bf27ab76a430ea56a474e1739ee71f164e
|
[
"MIT"
] | null | null | null |
haweb/apps/core/admin.py
|
edilio/tobeawebproperty
|
317205bf27ab76a430ea56a474e1739ee71f164e
|
[
"MIT"
] | 4
|
2015-01-02T21:39:58.000Z
|
2015-06-23T02:18:57.000Z
|
haweb/apps/core/admin.py
|
edilio/tobeawebproperty
|
317205bf27ab76a430ea56a474e1739ee71f164e
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django import forms
from .models import UserProfile, City, ZipCode, Unit, Tenant, Contract
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
class UserCreationForm(forms.ModelForm):
"""A form for creating new users. Includes all the required
fields, plus a repeated password."""
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = UserProfile
fields = ('email', )
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
# Save the provided password in hashed format
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
"""A form for updating users. Includes all the fields on
the user, but replaces the password field with admin's
password hash display field.
"""
password = ReadOnlyPasswordHashField()
class Meta:
model = UserProfile
fields = '__all__'
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class UserProfileAdmin(UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
model = UserProfile
list_display = ('email', 'first_name', 'last_name', 'is_staff')
ordering = ('email',)
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
(_('Photo'), {'fields': ('photo', )}),
(_('Title'), {'fields': ('title', )}),
(_('Phone'), {'fields': ('phone', )}),
)
@admin.register(City)
class CityAdmin(admin.ModelAdmin):
list_display = ('name', )
search_fields = ('name', )
@admin.register(ZipCode)
class ZipCodeAdmin(admin.ModelAdmin):
list_display = ('city', 'state', 'zip_code')
list_filter = ('state', 'city')
search_fields = ('city__name', 'zip_code')
@admin.register(Tenant)
class TenantAdmin(admin.ModelAdmin):
list_display = ('tenant_id', 'order_on', 'cell_phone', 'home_phone', 'work_phone')
search_fields = ('tenant_id', 'cell_phone', 'home_phone', 'work_phone', 'first_name', 'last_name')
@admin.register(Unit)
class UnitAdmin(admin.ModelAdmin):
list_display = ('unit_id', 'address', 'apartment', 'zip_code')
search_fields = ('unit_id', 'address', 'apartment')
list_filter = ('zip_code', )
@admin.register(Contract)
class ContractAdmin(admin.ModelAdmin):
list_display = ('tenant', 'unit', 'first_day', 'last_day')
admin.site.register(UserProfile, UserProfileAdmin)
| 33.579439
| 102
| 0.660729
|
da45ea7439d6b4503c8a8d771541f61bca610e8d
| 886
|
py
|
Python
|
hopper/utils.py
|
MSAdministrator/hopper
|
d25666e0957641d8e099d6aaf4fe346350dac869
|
[
"MIT"
] | 2
|
2020-10-27T04:08:21.000Z
|
2021-01-10T06:57:58.000Z
|
hopper/utils.py
|
MSAdministrator/hopper
|
d25666e0957641d8e099d6aaf4fe346350dac869
|
[
"MIT"
] | null | null | null |
hopper/utils.py
|
MSAdministrator/hopper
|
d25666e0957641d8e099d6aaf4fe346350dac869
|
[
"MIT"
] | null | null | null |
import sys
from email.header import decode_header, make_header
def python_version_greater_than_three():
return sys.version_info > (3, 0)
def cleanup_text(text):
"""
normalizes newline/tab chars, strips whitespace, removes newline chars from the ends.
"""
text = normalize_newlinechar(text)
text = normalize_tabchar(text)
return text.strip()
def normalize_newlinechar(text):
return text.replace("\\n", "\n")
def normalize_tabchar(text):
return text.replace("\\t", "\t")
def decode_and_convert_to_unicode(text):
if not text:
return ''
try:
header = make_header(decode_header(text))
if python_version_greater_than_three():
return str(header)
else:
return header
except Exception:
# Illegal encoding sequence used in the email header, return as is
return text
| 23.315789
| 89
| 0.670429
|
4c693538cb5b0a77a44ca199e48d6cfad7f361d1
| 18,057
|
py
|
Python
|
nova/tests/functional/test_aggregates.py
|
bopopescu/nova-rocky-system-reader-role
|
50a9d96f117b3c90aec214d1732f63fc6a1b98ea
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/functional/test_aggregates.py
|
bopopescu/nova-rocky-system-reader-role
|
50a9d96f117b3c90aec214d1732f63fc6a1b98ea
|
[
"Apache-2.0"
] | 2
|
2021-03-31T19:25:14.000Z
|
2021-12-13T20:15:06.000Z
|
nova/tests/functional/test_aggregates.py
|
bopopescu/nova-rocky-system-reader-role
|
50a9d96f117b3c90aec214d1732f63fc6a1b98ea
|
[
"Apache-2.0"
] | 1
|
2020-07-22T22:15:29.000Z
|
2020-07-22T22:15:29.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from nova.scheduler.client import report
import nova.conf
from nova import context as nova_context
from nova.scheduler import weights
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
from nova.virt import fake
CONF = nova.conf.CONF
class AggregatesTest(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2'
ADMIN_API = True
def _add_hosts_to_aggregate(self):
"""List all compute services and add them all to an aggregate."""
compute_services = [s for s in self.api.get_services()
if s['binary'] == 'nova-compute']
agg = {'aggregate': {'name': 'test-aggregate'}}
agg = self.api.post_aggregate(agg)
for service in compute_services:
self.api.add_host_to_aggregate(agg['id'], service['host'])
return len(compute_services)
def test_add_hosts(self):
# Default case with one compute, mapped for us
self.assertEqual(1, self._add_hosts_to_aggregate())
def test_add_unmapped_host(self):
"""Ensure that hosts without mappings are still found and added"""
# Add another compute, but nuke its HostMapping
self.start_service('compute', host='compute2')
self.host_mappings['compute2'].destroy()
self.assertEqual(2, self._add_hosts_to_aggregate())
class AggregateRequestFiltersTest(test.TestCase,
integrated_helpers.InstanceHelperMixin):
microversion = 'latest'
compute_driver = 'fake.MediumFakeDriver'
def setUp(self):
self.flags(compute_driver=self.compute_driver)
super(AggregateRequestFiltersTest, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.AllServicesCurrent())
placement = self.useFixture(nova_fixtures.PlacementFixture())
self.placement_api = placement.api
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.admin_api = api_fixture.admin_api
self.admin_api.microversion = self.microversion
self.api = self.admin_api
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.start_service('conductor')
self.scheduler_service = self.start_service('scheduler')
self.computes = {}
self.aggregates = {}
self._start_compute('host1')
self._start_compute('host2')
self.context = nova_context.get_admin_context()
self.report_client = report.SchedulerReportClient()
self.flavors = self.api.get_flavors()
# Aggregate with only host1
self._create_aggregate('only-host1')
self._add_host_to_aggregate('only-host1', 'host1')
# Aggregate with only host2
self._create_aggregate('only-host2')
self._add_host_to_aggregate('only-host2', 'host2')
# Aggregate with neither host
self._create_aggregate('no-hosts')
def _start_compute(self, host):
"""Start a nova compute service on the given host
:param host: the name of the host that will be associated to the
compute service.
:return: the nova compute service object
"""
fake.set_nodes([host])
self.addCleanup(fake.restore_nodes)
compute = self.start_service('compute', host=host)
self.computes[host] = compute
return compute
def _create_aggregate(self, name):
agg = self.admin_api.post_aggregate({'aggregate': {'name': name}})
self.aggregates[name] = agg
def _get_provider_uuid_by_host(self, host):
"""Return the compute node uuid for a named compute host."""
# NOTE(gibi): the compute node id is the same as the compute node
# provider uuid on that compute
resp = self.admin_api.api_get(
'os-hypervisors?hypervisor_hostname_pattern=%s' % host).body
return resp['hypervisors'][0]['id']
def _add_host_to_aggregate(self, agg, host):
"""Add a compute host to both nova and placement aggregates.
:param agg: Name of the nova aggregate
:param host: Name of the compute host
"""
agg = self.aggregates[agg]
self.admin_api.add_host_to_aggregate(agg['id'], host)
host_uuid = self._get_provider_uuid_by_host(host)
# Make sure we have a view of the provider we're about to mess with
# FIXME(efried): This should be a thing we can do without internals
self.report_client._ensure_resource_provider(
self.context, host_uuid, name=host)
self.report_client.aggregate_add_host(self.context, agg['uuid'], host)
def _wait_for_state_change(self, server, from_status):
for i in range(0, 50):
server = self.api.get_server(server['id'])
if server['status'] != from_status:
break
time.sleep(.1)
return server
def _boot_server(self, az=None):
server_req = self._build_minimal_create_server_request(
self.api, 'test-instance', flavor_id=self.flavors[0]['id'],
image_uuid='155d900f-4e14-4e4c-a73d-069cbf4541e6',
networks='none', az=az)
created_server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(created_server, 'BUILD')
return server
def _get_instance_host(self, server):
srv = self.admin_api.get_server(server['id'])
return srv['OS-EXT-SRV-ATTR:host']
def _set_az_aggregate(self, agg, az):
"""Set the availability_zone of an aggregate
:param agg: Name of the nova aggregate
:param az: Availability zone name
"""
agg = self.aggregates[agg]
action = {
'set_metadata': {
'metadata': {
'availability_zone': az,
}
},
}
self.admin_api.post_aggregate_action(agg['id'], action)
def _set_metadata(self, agg, metadata):
"""POST /os-aggregates/{aggregate_id}/action (set_metadata)
:param agg: Name of the nova aggregate
:param metadata: dict of aggregate metadata key/value pairs to add,
update, or remove if value=None (note "availability_zone" cannot be
nulled out once set).
"""
agg = self.aggregates[agg]
action = {
'set_metadata': {
'metadata': metadata
},
}
self.admin_api.post_aggregate_action(agg['id'], action)
def _grant_tenant_aggregate(self, agg, tenants):
"""Grant a set of tenants access to use an aggregate.
:param agg: Name of the nova aggregate
:param tenants: A list of all tenant ids that will be allowed access
"""
agg = self.aggregates[agg]
action = {
'set_metadata': {
'metadata': {
'filter_tenant_id%i' % i: tenant
for i, tenant in enumerate(tenants)
}
},
}
self.admin_api.post_aggregate_action(agg['id'], action)
class AggregatePostTest(AggregateRequestFiltersTest):
def test_set_az_for_aggreate_no_instances(self):
"""Should be possible to update AZ for an empty aggregate.
Check you can change the AZ name of an aggregate when it does
not contain any servers.
"""
self._set_az_aggregate('only-host1', 'fake-az')
def test_rename_to_same_az(self):
"""AZ rename should pass successfully if AZ name is not changed"""
az = 'fake-az'
self._set_az_aggregate('only-host1', az)
self._boot_server(az=az)
self._set_az_aggregate('only-host1', az)
def test_fail_set_az(self):
"""Check it is not possible to update a non-empty aggregate.
Check you cannot change the AZ name of an aggregate when it
contains any servers.
"""
az = 'fake-az'
self._set_az_aggregate('only-host1', az)
server = self._boot_server(az=az)
self.assertRaisesRegex(
client.OpenStackApiException,
'One or more hosts contain instances in this zone.',
self._set_az_aggregate, 'only-host1', 'new' + az)
# Configure for the SOFT_DELETED scenario.
self.flags(reclaim_instance_interval=300)
self.api.delete_server(server['id'])
server = self._wait_for_state_change(server, from_status='ACTIVE')
self.assertEqual('SOFT_DELETED', server['status'])
self.assertRaisesRegex(
client.OpenStackApiException,
'One or more hosts contain instances in this zone.',
self._set_az_aggregate, 'only-host1', 'new' + az)
# Force delete the SOFT_DELETED server.
self.api.api_post(
'/servers/%s/action' % server['id'], {'forceDelete': None})
# Wait for it to be deleted since forceDelete is asynchronous.
self._wait_until_deleted(server)
# Now we can rename the AZ since the server is gone.
self._set_az_aggregate('only-host1', 'new' + az)
def test_cannot_delete_az(self):
az = 'fake-az'
# Assign the AZ to the aggregate.
self._set_az_aggregate('only-host1', az)
# Set some metadata on the aggregate; note the "availability_zone"
# metadata key is not specified.
self._set_metadata('only-host1', {'foo': 'bar'})
# Verify the AZ was retained.
agg = self.admin_api.api_get(
'/os-aggregates/%s' %
self.aggregates['only-host1']['id']).body['aggregate']
self.assertEqual(az, agg['availability_zone'])
# NOTE: this test case has the same test methods as AggregatePostTest
# but for the AZ update it uses PUT /os-aggregates/{aggregate_id} method
class AggregatePutTest(AggregatePostTest):
def _set_az_aggregate(self, agg, az):
"""Set the availability_zone of an aggregate via PUT
:param agg: Name of the nova aggregate
:param az: Availability zone name
"""
agg = self.aggregates[agg]
body = {
'aggregate': {
'availability_zone': az,
},
}
self.admin_api.put_aggregate(agg['id'], body)
class TenantAggregateFilterTest(AggregateRequestFiltersTest):
def setUp(self):
super(TenantAggregateFilterTest, self).setUp()
# Default to enabling the filter and making it mandatory
self.flags(limit_tenants_to_placement_aggregate=True,
group='scheduler')
self.flags(placement_aggregate_required_for_tenants=True,
group='scheduler')
def test_tenant_id_required_fails_if_no_aggregate(self):
server = self._boot_server()
# Without granting our tenant permission to an aggregate, instance
# creates should fail since aggregates are required
self.assertEqual('ERROR', server['status'])
def test_tenant_id_not_required_succeeds_if_no_aggregate(self):
self.flags(placement_aggregate_required_for_tenants=False,
group='scheduler')
server = self._boot_server()
# Without granting our tenant permission to an aggregate, instance
# creates should still succeed since aggregates are not required
self.assertEqual('ACTIVE', server['status'])
def test_filter_honors_tenant_id(self):
tenant = self.api.project_id
# Grant our tenant access to the aggregate with only host1 in it
# and boot some servers. They should all stack up on host1.
self._grant_tenant_aggregate('only-host1',
['foo', tenant, 'bar'])
server1 = self._boot_server()
server2 = self._boot_server()
self.assertEqual('ACTIVE', server1['status'])
self.assertEqual('ACTIVE', server2['status'])
# Grant our tenant access to the aggregate with only host2 in it
# and boot some servers. They should all stack up on host2.
self._grant_tenant_aggregate('only-host1',
['foo', 'bar'])
self._grant_tenant_aggregate('only-host2',
['foo', tenant, 'bar'])
server3 = self._boot_server()
server4 = self._boot_server()
self.assertEqual('ACTIVE', server3['status'])
self.assertEqual('ACTIVE', server4['status'])
# Make sure the servers landed on the hosts we had access to at
# the time we booted them.
hosts = [self._get_instance_host(s)
for s in (server1, server2, server3, server4)]
expected_hosts = ['host1', 'host1', 'host2', 'host2']
self.assertEqual(expected_hosts, hosts)
def test_filter_with_empty_aggregate(self):
tenant = self.api.project_id
# Grant our tenant access to the aggregate with no hosts in it
self._grant_tenant_aggregate('no-hosts',
['foo', tenant, 'bar'])
server = self._boot_server()
self.assertEqual('ERROR', server['status'])
def test_filter_with_multiple_aggregates_for_tenant(self):
tenant = self.api.project_id
# Grant our tenant access to the aggregate with no hosts in it,
# and one with a host.
self._grant_tenant_aggregate('no-hosts',
['foo', tenant, 'bar'])
self._grant_tenant_aggregate('only-host2',
['foo', tenant, 'bar'])
# Boot several servers and make sure they all land on the
# only host we have access to.
for i in range(0, 4):
server = self._boot_server()
self.assertEqual('ACTIVE', server['status'])
self.assertEqual('host2', self._get_instance_host(server))
class HostNameWeigher(weights.BaseHostWeigher):
def _weigh_object(self, host_state, weight_properties):
"""Arbitrary preferring host1 over host2 over host3."""
weights = {'host1': 100, 'host2': 50, 'host3': 1}
return weights.get(host_state.host, 0)
class AvailabilityZoneFilterTest(AggregateRequestFiltersTest):
def setUp(self):
# Default to enabling the filter
self.flags(query_placement_for_availability_zone=True,
group='scheduler')
# Use our custom weigher defined above to make sure that we have
# a predictable scheduling sort order.
self.flags(weight_classes=[__name__ + '.HostNameWeigher'],
group='filter_scheduler')
# NOTE(danms): Do this before calling setUp() so that
# the scheduler service that is started sees the new value
filters = CONF.filter_scheduler.enabled_filters
filters.remove('AvailabilityZoneFilter')
self.flags(enabled_filters=filters, group='filter_scheduler')
super(AvailabilityZoneFilterTest, self).setUp()
def test_filter_with_az(self):
self._set_az_aggregate('only-host2', 'myaz')
server1 = self._boot_server(az='myaz')
server2 = self._boot_server(az='myaz')
hosts = [self._get_instance_host(s) for s in (server1, server2)]
self.assertEqual(['host2', 'host2'], hosts)
class TestAggregateFiltersTogether(AggregateRequestFiltersTest):
def setUp(self):
# NOTE(danms): Do this before calling setUp() so that
# the scheduler service that is started sees the new value
filters = CONF.filter_scheduler.enabled_filters
filters.remove('AvailabilityZoneFilter')
self.flags(enabled_filters=filters, group='filter_scheduler')
super(TestAggregateFiltersTogether, self).setUp()
# Default to enabling both filters
self.flags(limit_tenants_to_placement_aggregate=True,
group='scheduler')
self.flags(placement_aggregate_required_for_tenants=True,
group='scheduler')
self.flags(query_placement_for_availability_zone=True,
group='scheduler')
def test_tenant_with_az_match(self):
# Grant our tenant access to the aggregate with
# host1
self._grant_tenant_aggregate('only-host1',
[self.api.project_id])
# Set an az on only-host1
self._set_az_aggregate('only-host1', 'myaz')
# Boot the server into that az and make sure we land
server = self._boot_server(az='myaz')
self.assertEqual('host1', self._get_instance_host(server))
def test_tenant_with_az_mismatch(self):
# Grant our tenant access to the aggregate with
# host1
self._grant_tenant_aggregate('only-host1',
[self.api.project_id])
# Set an az on only-host2
self._set_az_aggregate('only-host2', 'myaz')
# Boot the server into that az and make sure we fail
server = self._boot_server(az='myaz')
self.assertIsNone(self._get_instance_host(server))
server = self.api.get_server(server['id'])
self.assertEqual('ERROR', server['status'])
| 38.915948
| 79
| 0.639641
|
d71e3f9430d789d21d9f39234d5de2b6b8857600
| 2,451
|
py
|
Python
|
examples/tensorflow/image-classifier-resnet50/predictor.py
|
gvvynplaine/cortex
|
aa3daf0d138a880df29a2c075af41176119da47f
|
[
"Apache-2.0"
] | 1
|
2020-08-07T10:26:01.000Z
|
2020-08-07T10:26:01.000Z
|
examples/tensorflow/image-classifier-resnet50/predictor.py
|
gvvynplaine/cortex
|
aa3daf0d138a880df29a2c075af41176119da47f
|
[
"Apache-2.0"
] | null | null | null |
examples/tensorflow/image-classifier-resnet50/predictor.py
|
gvvynplaine/cortex
|
aa3daf0d138a880df29a2c075af41176119da47f
|
[
"Apache-2.0"
] | null | null | null |
# WARNING: you are on the master branch, please refer to the examples on the branch that matches your `cortex version`
import os
import cv2
import numpy as np
import requests
import json
import base64
def get_url_image(url_image):
"""
Get numpy image from URL image.
"""
resp = requests.get(url_image, stream=True).raw
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def decode_images(images):
"""
Decodes the images from the payload.
"""
output = []
for image in images:
img = base64.b64decode(image)
jpg_as_np = np.frombuffer(img, dtype=np.uint8)
img = cv2.imdecode(jpg_as_np, flags=cv2.IMREAD_COLOR)
output.append(img)
return output
def prepare_images(images, input_shape, input_key):
"""
Prepares images for the TFS client.
"""
output = []
for image in images:
img = cv2.resize(image, input_shape, interpolation=cv2.INTER_NEAREST)
img = {input_key: img[np.newaxis, ...]}
output.append(img)
return output
class TensorFlowPredictor:
def __init__(self, tensorflow_client, config):
self.client = tensorflow_client
# load classes
classes = requests.get(config["classes"]).json()
self.idx2label = [classes[str(k)][1] for k in range(len(classes))]
self.input_shape = tuple(config["input_shape"])
self.input_key = str(config["input_key"])
def predict(self, payload):
# preprocess image
payload_keys = payload.keys()
if "imgs" in payload_keys:
imgs = payload["imgs"]
imgs = decode_images(imgs)
elif "url" in payload_keys:
imgs = [get_url_image(payload["url"])]
else:
return None
prepared_imgs = prepare_images(imgs, self.input_shape, self.input_key)
# batch sized images
top5_list_imgs = []
for img in prepared_imgs:
# predict
results = self.client.predict(img)["output"]
results = np.argsort(results)
# Lookup and print the top 5 labels
top5_idx = results[-5:]
top5_labels = [self.idx2label[idx] for idx in top5_idx]
top5_labels = top5_labels[::-1]
top5_list_imgs.append(top5_labels)
return top5_list_imgs
| 28.5
| 118
| 0.627091
|
a14c2f94273c358944b5637bd7a0c09667d983f3
| 7,317
|
py
|
Python
|
grid_royale/vectoring.py
|
snakile/grid_royale
|
890486bc6e7dbf5d8835981d9bc5b97d651e1f32
|
[
"MIT"
] | null | null | null |
grid_royale/vectoring.py
|
snakile/grid_royale
|
890486bc6e7dbf5d8835981d9bc5b97d651e1f32
|
[
"MIT"
] | null | null | null |
grid_royale/vectoring.py
|
snakile/grid_royale
|
890486bc6e7dbf5d8835981d9bc5b97d651e1f32
|
[
"MIT"
] | null | null | null |
# Copyright 2020 Ram Rachum and collaborators.
# This program is distributed under the MIT license.
from __future__ import annotations
import dataclasses
import itertools
import operator
import functools
import math
from typing import (Optional, Tuple, Union, Container, Hashable, Iterator,
Iterable, Any, Dict, FrozenSet)
import numpy as np
from . import base
@dataclasses.dataclass(frozen=True)
class Vector:
'''A vector in 2 dimensions, e.g. Vector(-2, 3)'''
x: int
y: int
__iter__ = lambda self: iter((self.x, self.y))
def __matmul__(self, other: Vector) -> int:
'''Get Levenshtein distance between two vectors'''
return sum(map(abs, map(operator.sub, self, other)))
def __neg__(self) -> Vector:
return type(self)(x=-self.x, y=-self.y)
__bool__ = lambda self: any(self)
def rotate_in_board(self, n_right_angles: int, /, board_size: int) -> Vector:
x, y = self
for _ in range(n_right_angles % 4):
(x, y) = (board_size - 1 - y, x)
return type(self)(x, y)
def iterate_rotations_in_board(self, /, board_size: int) -> Iterator[Vector]:
x, y = self
yield self
for _ in range(3):
(x, y) = (board_size - 1 - y, x)
yield type(self)(x, y)
def __hash__(self):
return hash((type(self), *self))
def __eq__(self, other: Any):
return (type(self) is type(other)) and (tuple(self) == tuple(other))
class Position(Vector):
@staticmethod
def iterate_all(state_or_board_size: Union[base._BaseGrid, int], /):
'''Iterate over all positions in a board of the given size.'''
board_size: int = (state_or_board_size if isinstance(state_or_board_size, int)
else state_or_board_size.board_size)
for y, x in itertools.product(range(board_size), repeat=2):
yield Position(x, y)
def __sub__(self, other: Union[Position, Translation]):
if isinstance(other, Position):
result_type = Translation
else:
assert isinstance(other, Translation)
result_type = Position
return result_type(self.x - other.x, self.y - other.y)
def __add__(self, translation: Translation):
assert isinstance(translation, Translation)
return Position(self.x + translation.x,
self.y + translation.y)
def in_square(self, square_size: int) -> bool:
return ((0 <= self.x <= square_size - 1) and
(0 <= self.y <= square_size - 1))
def walk(self, translation: Translation,
board_size: Optional[int] = None) -> Iterator[Position]:
position = self
if board_size is None:
while True:
yield position
position += translation
else:
while 0 <= min(position) <= max(position) <= board_size - 1:
yield position
position += translation
@functools.lru_cache(maxsize=None)
def field_of_view(self, vicinity: Vicinity, board_size: int) -> Tuple[FrozenSet[Position]]:
result = []
count_from_one = itertools.count(start=1)
if vicinity._is_step_like():
for i in count_from_one:
positions = frozenset(
position for j in range(-i, i + 1) if (position := (self +
i * vicinity + j * Step(*vicinity).perpendiculars[0])).in_square(board_size)
)
if not positions:
return tuple(result)
result.append(positions)
else:
for i in count_from_one:
positions = frozenset(
position for j in range(i + 1) if (position := (
self + Translation(j * vicinity.x, (i - j) * vicinity.y))).in_square(board_size)
)
if not positions:
return tuple(result)
result.append(positions)
def horizontal_line_in_board(self, board_size: int) -> Tuple[Position]:
return tuple(Position(i, self.y) for i in range(board_size))
def vertical_line_in_board(self, board_size: int) -> Tuple[Position]:
return tuple(Position(self.x, i) for i in range(board_size))
PositionLike = Union[Position, Tuple[int, int]]
class Translation(Vector):
'''A Translation (i.e. movement) in 2-dimensional space.'''
def __mul__(self, number: int) -> Translation:
assert isinstance(number, int)
return type(self)(x=(self.x * number), y=(self.y * number))
def __rmul__(self, number: int) -> Translation:
return self * number
def _is_step_like(self):
return tuple(self).count(0) == 1
class Vicinity(Translation):
'''A movement like a king in chess, i.e. 8 possible positions around the origin.'''
all_vicinities: Tuple[Vicinity]
def __init__(self, x: int, y: int):
assert {0} != set((x, y)) <= {-1, 0, 1}
super().__init__(x=x, y=y)
def __mul__(self, number: int) -> Translation:
assert isinstance(number, int)
return Translation(x=(self.x * number), y=(self.y * number))
Vicinity.all_vicinities = tuple(
itertools.starmap(
Vicinity,
filter(
any,
itertools.product((-1, 0, 1), repeat=2)
)
)
)
class Step(Vicinity):
_all_ascii = '↑→↓←'
def __init__(self, x: int, y: int):
assert frozenset((x, y)) in {frozenset((0, 1)),
frozenset((0, -1))}
super().__init__(x=x, y=y)
@property
def index(self):
try:
return self._index
except AttributeError:
self._index = tuple(Step.all_steps).index(self)
return self._index
@property
def name(self) -> str:
try:
return self._name
except AttributeError:
self._name = ('up', 'right', 'down', 'left')[self.index]
return self._name
@property
def ascii(self) -> str:
try:
return self._ascii
except AttributeError:
self._ascii = Step._all_ascii[tuple(Step.all_steps).index(self)]
return self._ascii
def is_general_direction(self, source: Position, target: Position) -> bool:
translation: Translation = target - source
if self.x:
return ((np.sign(translation.x) == self.x) and
abs(translation.x) >= abs(translation.y))
else:
assert self.y
return ((np.sign(translation.y) == self.y) and
abs(translation.y) >= abs(translation.x))
@property
def perpendiculars(self) -> Tuple[Step]:
try:
return self._perpendiculars
except AttributeError:
self._perpendiculars = tuple(
sorted(
((first := type(self)(*reversed(tuple(self)))), -first),
key=tuple
)
)
return self._perpendiculars
@property
def angle_from_top(self):
return math.tau * (self.index / 4)
(Step.up, Step.right, Step.down, Step.left) = Step.all_steps = (
Step(0, -1), Step(1, 0), Step(0, 1), Step(-1, 0)
)
| 30.235537
| 100
| 0.572776
|
3739c82b2d5241bd202fdda138903782e89dc175
| 412
|
py
|
Python
|
packages/python/plotly/plotly/validators/histogram2d/_showlegend.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/histogram2d/_showlegend.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/histogram2d/_showlegend.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class ShowlegendValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showlegend", parent_name="histogram2d", **kwargs):
super(ShowlegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
| 34.333333
| 86
| 0.684466
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.