max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
app/mongo/data/collect/models/model.py | jgphilpott/polyMaker | 0 | 12771651 | class Model():
def __init__(self, model):
pass
| 1.359375 | 1 |
qs_backend/qs_backend/workers/worker_fetch_stock.py | Praneesh/quickstocks | 2 | 12771652 | <filename>qs_backend/qs_backend/workers/worker_fetch_stock.py
#! /usr/bin/env python3
# __author__ = "<NAME>"
# __credits__ = []
# __version__ = "0.1.2"
# __maintainer__ = "<NAME>"
# __email__ = "<EMAIL>"
# __status__ = "Prototype"
#
# Step 1: Spawned as a thread, fetches the stock price and pushes into a local message queue
#
# Step 2: Publisher would read this queue, identify the corresponding publish queue, creates
# a Dispatch packet and pushes onto Crossbar Queue, subscribed by Dispatcher
from queue import Queue
from qs_backend.models.stock_model import StockModel
from qs_backend.queues.stock_delivery_queue import StockDeliveryQueue
from qs_backend.logger.default_logger import QSDefaultLogger
from qs_backend.decorators.stock_publish_payload_to_dict import StockPublishPayloadToDict
from yahoo_finance import Share
class StockWorker:
def __init__(self):
# Get the logger instance !!
qs_logger_instance = QSDefaultLogger()
self.qs_logger = qs_logger_instance.get_logger(name=__name__)
# Get the decorator instance !
self.payload_to_publish_dict = StockPublishPayloadToDict()
def fetch_stock_price(self, stock_unit_key):
# Step 1: Make HTTP Call to fetch the Stock Details
# Step 2: Once received, create it into its corresponding model
# Step 2.1 : Between the models, exchange packet as a native dictionary, rather as a JSON object
# Get the share price
share_item = Share(stock_unit_key)
if share_item.get_open() is None:
return
share_item_dict = share_item.data_set
st_model = StockModel()
st_model.stock_unit = stock_unit_key
st_model.stock_title = share_item_dict['Name']
# Share Price + Unit of Currency
st_model.stock_price = share_item.get_price() + " " +share_item_dict['Currency']
deviation_price = share_item.get_change()
st_model.stock_deviation = deviation_price + " ("+share_item_dict['ChangeinPercent'] + ") " # Ex: '-1.83 (-1.59%)'
if deviation_price[0] == '-':
st_model.stock_deviation_status = 'Decline'
else:
st_model.stock_deviation_status = 'Incline'
st_model.stock_equity = share_item.get_stock_exchange()
st_model.stock_last_update_time = 'At close: ' + share_item_dict['LastTradeDateTimeUTC']
st_model.stock_52wkrange = share_item.get_year_low() + " - " + share_item.get_year_high()
st_model.stock_open = share_item.get_open()
st_model.stock_market_cap = share_item.get_market_cap()
st_model.stock_prev_close = share_item.get_prev_close()
st_model.stock_peratio_tte = share_item.get_price_earnings_ratio()
st_model_to_publish = self.payload_to_publish_dict.get_stock_payload_to_publish(st_model)
self.push_stock_to_delivery_queue(st_model_to_publish, stock_unit_key)
def push_stock_to_delivery_queue(self, stock_model, stock_key):
# Step 3 : Push this packet into a common queue
stock_d_queue_instance = StockDeliveryQueue()
stock_d_queue = stock_d_queue_instance.get_queue()
self.qs_logger.info(msg='Got instance of Stock Delivery Queue')
try:
stock_d_queue.put_nowait(stock_model)
self.qs_logger.debug(msg='Added Stock Item into Stock Delivery Queue : {}'.format(stock_model))
except Queue.full as queue_full_exception:
self.qs_logger.exception(msg='Queue Full Exception {}'.format(queue_full_exception))
print("Queue Full Exception {}".format(queue_full_exception))
except Exception as general_exception:
self.qs_logger.exception(msg="General Exception {}".format(general_exception))
if __name__ == '__main__':
fetch_test = StockWorker()
fetch_test.fetch_stock_price('HON')
| 2.65625 | 3 |
Source/lbp.py | SaladinoBelisario/PatternsProject | 1 | 12771653 | <gh_stars>1-10
import numpy as np
from scipy.signal import convolve2d
#Filter to make a series of convolve operations
f1 = np.array([[1, 0, 0], [0, -1, 0], [0, 0, 0]])
f2 = np.array([[0, 1, 0], [0, -1, 0], [0, 0, 0]])
f3 = np.array([[0, 0, 1], [0, -1, 0], [0, 0, 0]])
f4 = np.array([[0, 0, 0], [0, -1, 1], [0, 0, 0]])
f5 = np.array([[0, 0, 0], [0, -1, 0], [0, 0, 1]])
f6 = np.array([[0, 0, 0], [0, -1, 0], [0, 1, 0]])
f7 = np.array([[0, 0, 0], [0, -1, 0], [1, 0, 0]])
f8 = np.array([[0, 0, 0], [1, -1, 0], [0, 0, 0]])
def threshold(arr):
aux = [1 if elem >= 0 else 0 for vec in arr for elem in vec]
return np.reshape(aux, arr.shape)
def calculate_lbp(gray_img):
###Using a 3x3 kernel
c1 = convolve2d(gray_img, f1, mode = 'same')
c2 = convolve2d(gray_img, f2, mode = 'same')
c3 = convolve2d(gray_img, f3, mode = 'same')
c4 = convolve2d(gray_img, f4, mode = 'same')
c5 = convolve2d(gray_img, f5, mode = 'same')
c6 = convolve2d(gray_img, f6, mode = 'same')
c7 = convolve2d(gray_img, f7, mode = 'same')
c8 = convolve2d(gray_img, f8, mode = 'same')
t1 = threshold(c1)
t2 = threshold(c2)
t3 = threshold(c3)
t4 = threshold(c4)
t5 = threshold(c5)
t6 = threshold(c6)
t7 = threshold(c7)
t8 = threshold(c8)
return np.uint8(t1 + t2 * 2 + t3 * 4 + t4 * 8 + t5 * 16 + t6 * 32 + t7 * 64 + t8 * 128)
| 2.25 | 2 |
hvac_cli/cmd.py | hvac/hvac-cli | 5 | 12771654 | import sys
import os
from cliff.app import App
from cliff.commandmanager import CommandManager
from hvac_cli.version import __version__
DEFAULT_VAULT_ADDR = 'http://127.0.0.1:8200'
class HvacApp(App):
def __init__(self):
super(HvacApp, self).__init__(
description="""
hvac-cli is CLI to Hashicorp Vault with additional features.
It does not support extensions that are not available
as Free Software such as namespaces, Sentinel, Policy Overrides
or Multi-factor Authentication (MFA).
""",
version=__version__,
command_manager=CommandManager('hvac_cli'),
deferred_help=True,
)
def build_option_parser(self, description, version, argparse_kwargs=None):
parser = super().build_option_parser(description, version, argparse_kwargs)
self.set_parser_arguments(parser)
return parser
@staticmethod
def set_parser_arguments(parser):
parser.add_argument(
'--dry-run',
action='store_true',
help='Show what would be done but do nothing'
)
parser.add_argument(
'--token',
required=False,
default=os.getenv('VAULT_TOKEN'),
help=('Vault token. It will be prompted interactively if unset. '
'This can also be specified via the VAULT_TOKEN environment variable.')
)
parser.add_argument(
'--address', '--agent-address',
default=os.getenv('VAULT_AGENT_ADDR', os.getenv('VAULT_ADDR', DEFAULT_VAULT_ADDR)),
required=False,
dest='address',
help=('Address of the Vault server or the Vault agent. '
'--agent-address was introduced with vault 1.1.0. '
'This can also be specified via the VAULT_ADDR '
'or the VAULT_AGENT_ADDR environment variable. '
'If both VAULT_AGENT_ADDR and VAULT_ADDR are in the environment '
'VAULT_AGENT_ADDR has precedence')
)
parser.add_argument(
'--tls-skip-verify',
action='store_true',
default=True if os.getenv('VAULT_SKIP_VERIFY', False) else False,
required=False,
help=('Disable verification of TLS certificates. Using this option is highly '
'discouraged and decreases the security of data transmissions to and from '
'the Vault server. The default is false. '
'This can also be specified via the VAULT_SKIP_VERIFY environment variable.')
)
parser.add_argument(
'--ca-cert',
default=os.getenv('VAULT_CACERT'),
required=False,
help=('Path on the local disk to a single PEM-encoded CA certificate to verify '
'the Vault server\'s SSL certificate. '
'This can also be specified via the VAULT_CACERT environment variable. ')
)
parser.add_argument(
'--client-cert',
default=os.getenv('VAULT_CLIENT_CERT'),
required=False,
help=('Path on the local disk to a single PEM-encoded CA certificate to use '
'for TLS authentication to the Vault server. If this flag is specified, '
'--client-key is also required. '
'This can also be specified via the VAULT_CLIENT_CERT environment variable.')
)
parser.add_argument(
'--client-key',
default=os.getenv('VAULT_CLIENT_KEY'),
required=False,
help=('Path on the local disk to a single PEM-encoded private key matching the '
'client certificate from -client-cert. '
'This can also be specified via the VAULT_CLIENT_KEY environment variable.')
)
def main(argv=sys.argv[1:]):
myapp = HvacApp()
return myapp.run(argv)
| 2.453125 | 2 |
python__fundamentals/data_types_and_variables_lab/05.pounds_to_dollars.py | EmilianStoyanov/Projects-in-SoftUni | 1 | 12771655 | pound = int(input())
conv_to_dollar = pound * 1.31
print(f"{conv_to_dollar:.3f}")
| 3.59375 | 4 |
jcourse_api/admin.py | dujiajun/jcourse_api | 7 | 12771656 | <reponame>dujiajun/jcourse_api
import os
from django.contrib import admin
from django.db import IntegrityError
from django.db.models import F
from import_export import resources, fields
from import_export.admin import ImportExportModelAdmin
from import_export.widgets import ForeignKeyWidget, ManyToManyWidget
from jcourse_api.models import *
class CourseResource(resources.ModelResource):
department = fields.Field(attribute='department', widget=ForeignKeyWidget(Department, 'name'))
category = fields.Field(attribute='category', widget=ForeignKeyWidget(Category, 'name'))
main_teacher = fields.Field(attribute='main_teacher', widget=ForeignKeyWidget(Teacher, 'tid'))
teacher_group = fields.Field(attribute='teacher_group',
widget=ManyToManyWidget(Teacher, separator=';', field='tid'))
last_semester = fields.Field(attribute='last_semester', widget=ForeignKeyWidget(Semester, 'name'))
class Meta:
model = Course
import_id_fields = ('code', 'main_teacher')
exclude = ('id', 'review_count', 'review_avg')
skip_unchanged = True
report_skipped = False
export_order = (
'code', 'name', 'credit', 'department', 'category', 'main_teacher', 'teacher_group', 'last_semester')
def save_instance(self, instance, using_transactions=True, dry_run=False):
try:
super().save_instance(instance, using_transactions, dry_run)
except IntegrityError:
pass
@admin.register(Course)
class CourseAdmin(ImportExportModelAdmin):
list_display = (
'id', 'code', 'name', 'credit', 'department', 'category', 'main_teacher', 'review_count', 'review_avg',
'last_semester')
list_filter = ('department', 'category', 'credit', 'last_semester')
search_fields = ('id', 'code', 'name')
autocomplete_fields = ('main_teacher', 'teacher_group')
resource_class = CourseResource
readonly_fields = ('review_count', 'review_avg')
class TeacherResource(resources.ModelResource):
department = fields.Field(attribute='department', widget=ForeignKeyWidget(Department, 'name'))
last_semester = fields.Field(attribute='last_semester', widget=ForeignKeyWidget(Semester, 'name'))
class Meta:
model = Teacher
import_id_fields = ('tid',)
skip_unchanged = True
report_skipped = False
exclude = ('id',)
export_order = ('tid', 'name', 'department', 'title', 'last_semester')
def save_instance(self, instance, using_transactions=True, dry_run=False):
try:
super().save_instance(instance, using_transactions, dry_run)
except IntegrityError:
pass
@admin.register(Teacher)
class TeacherAdmin(ImportExportModelAdmin):
resource_class = TeacherResource
list_display = ('tid', 'name', 'department', 'title', 'pinyin', 'abbr_pinyin', 'last_semester')
list_filter = ('department', 'title', 'last_semester')
search_fields = ('name', 'pinyin', 'abbr_pinyin')
@admin.register(FormerCode)
class FormerCodeAdmin(ImportExportModelAdmin):
list_display = ('old_code', 'new_code')
search_fields = ('old_code', 'new_code')
@admin.register(Review)
class ReviewAdmin(ImportExportModelAdmin):
autocomplete_fields = ('user', 'course')
list_display = (
'user', 'course', 'created', 'last_modified', 'approve_count', 'disapprove_count', 'comment_validity')
search_fields = ('user__username', 'course__code')
readonly_fields = ('approve_count', 'disapprove_count')
@admin.display(ordering=F('modified').desc(nulls_last=True), description='更新时间')
def last_modified(self, obj):
return obj.modified
@admin.register(Report)
class ReportAdmin(ImportExportModelAdmin):
list_display = ('user', 'solved', 'reply_validity', 'comment_validity', 'created')
search_fields = ('user__username',)
list_filter = ('solved',)
readonly_fields = ('user', 'comment', 'created')
@admin.register(Action)
class ActionAdmin(ImportExportModelAdmin):
list_display = ('user', 'action', 'review',)
search_fields = ('user__username', 'review__course__code')
readonly_fields = ('user', 'review',)
@admin.register(Notice)
class NoticeAdmin(ImportExportModelAdmin):
list_display = ('title', 'message', 'created', 'available')
class DepartmentResource(resources.ModelResource):
class Meta:
model = Department
exclude = ('id', 'count')
skip_unchanged = True
report_skipped = False
import_id_fields = ('name',)
def save_instance(self, instance, using_transactions=True, dry_run=False):
try:
super().save_instance(instance, using_transactions, dry_run)
except IntegrityError:
pass
@admin.register(Department)
class DepartmentAdmin(ImportExportModelAdmin):
list_display = ('id', 'name', 'count')
resource_class = DepartmentResource
@admin.register(Category)
class CategoryAdmin(ImportExportModelAdmin):
list_display = ('id', 'name', 'count')
@admin.register(Semester)
class NameAdmin(ImportExportModelAdmin):
list_display = ('id', 'name')
@admin.register(UserPoint)
class UserPointAdmin(ImportExportModelAdmin):
autocomplete_fields = ('user',)
list_display = ('user', 'value', 'description', 'time')
search_fields = ('user__username', 'description')
@admin.register(EnrollCourse)
class EnrollCourseAdmin(ImportExportModelAdmin):
list_display = ('user', 'course', 'semester')
search_fields = ('user__username', 'course__code')
readonly_fields = ('user', 'course', 'semester')
@admin.register(ApiKey)
class ApiKeyAdmin(admin.ModelAdmin):
def formfield_for_dbfield(self, db_field, request, **kwargs):
field = super().formfield_for_dbfield(db_field, request, **kwargs)
if db_field.name == 'key':
field.initial = os.urandom(16).hex()
return field
| 2.015625 | 2 |
web-crawler/spider-wechat/config.py | lovelifeming/AI-Studies-Road | 0 | 12771657 | <filename>web-crawler/spider-wechat/config.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/4/30 13:40
# @Author : zengsm
# @File : config
#爬取公众号文章
PROXY_POOL_URL = 'http://127.0.0.1:5000/get'
KEYWORD ='计算机等级二级' # 输入关键词
MONGO_URI = 'localhost'
MONGO_DB = 'data'
MAX_COUNT = 5 | 1.398438 | 1 |
QGrain/__init__.py | yuriok/QGrain | 4 | 12771658 | QGRAIN_VERSION = "0.3.4.2"
import os
QGRAIN_ROOT_PATH = os.path.dirname(__file__)
from enum import Enum, unique
@unique
class DistributionType(Enum):
Customized = "Customized"
Nonparametric = "NonParametric"
Normal = "Normal"
Weibull = "Weibull"
SkewNormal = "SkewNormal"
@unique
class FittingState(Enum):
NotStarted = 0
Fitting = 1
Failed = 2
Succeeded = 4
| 2.421875 | 2 |
testing/vcs/test_vcs_verify_levels_extensions_order_setting.py | xylar/cdat | 62 | 12771659 | import vcs
x=vcs.init()
b=x.createboxfill()
b.boxfill_type="custom"
b.levels=[10.0, 21.42857142857143, 32.85714285714286, 44.28571428571429, 55.71428571428572, 67.14285714285715, 78.57142857142858, 90.00000000000001]
assert(abs(b.levels[0])<1.e19 and b.ext_1 is False and b.ext_2 is False)
b.ext_1=False
assert(abs(b.levels[0])<1.e19 and b.ext_1 is False and b.ext_2 is False)
| 2.25 | 2 |
src/preprocessor.py | martinoywa/MultiDigitClassification-API | 4 | 12771660 | <reponame>martinoywa/MultiDigitClassification-API
import io
from torchvision import transforms
from PIL import Image
def preprocess(image_bytes):
"""
Returns transformed image tensor.
:param image_bytes: image bytes from canvas.
:return: transformed image.
"""
transform = transforms.Compose([
transforms.Resize([64, 64]),
transforms.CenterCrop([54, 54]),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
# image_bytes are what we get from web request then grays the image
image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
# sends a single image
return transform(image).unsqueeze(0)
| 3.28125 | 3 |
z.uncategorized/softeer581.py | kimminki10/algorithms2 | 0 | 12771661 | """
https://softeer.ai/practice/info.do?eventIdx=1&psProblemId=581
택배 마스터 광우
큐, 순열
"""
import sys
input = sys.stdin.readline
import itertools
N, M, K = map(int, input().split())
arr = map(int, input().split())
result = 987654321
def dojob(k, p, m):
global result
part_sum = 0
s = 0
i = 0
while True:
if part_sum > result: return
if k == 0: break
if s + p[i] <= m:
s += p[i]
else:
part_sum += s
s = p[i]
k -= 1
i = (i + 1) % N
if part_sum < result:
result = part_sum
def solve():
for p in itertools.permutations(arr):
dojob(K, p, M)
print(result)
solve() | 2.90625 | 3 |
Utilities/learn_weights.py | tesslerc/H-DRLN | 31 | 12771662 | <filename>Utilities/learn_weights.py<gh_stars>10-100
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import h5py
import numpy as np
import math
import tensorflow as tf
FLAGS = None
def main(_):
hiddenWidth1 = 100
hiddenWidth2 = 64
outputWidth = 5
weightInit = -1
batchSize = 4
gamma = 0.7
dataOut = h5py.File('skillWeightsQ.h5', 'w') #_2Layer.h5', 'w')
# Import data
print('Loading data...')
data = h5py.File(FLAGS.file, 'r')
numSkills = data.get('numberSkills')
print('Number of skills is ' + str(numSkills[()]))
dataOut.create_dataset('hiddenWidth', data=hiddenWidth1)
dataOut.create_dataset('numberSkills', data=numSkills)
for skill in range(numSkills[()]):
activations = np.array(data.get('activations_' + str(skill)))
actions = (np.array(data.get('actions_' + str(skill))) - 1)
termination = np.array(data.get('termination_' + str(skill)))
print('Creating model...')
# Create the model
step = tf.Variable(0, trainable=False) # cant attach non trainable variable to gpu
with tf.device('/gpu:1'):
x = tf.placeholder(tf.float32, [None, 512, ])
# Hidden Layer1
W_hidden1 = tf.Variable(tf.truncated_normal([512, hiddenWidth1], stddev=0.1))
b_hidden1 = tf.Variable(tf.constant(0.1, shape=[hiddenWidth1]))
y_hidden1 = tf.add(tf.matmul(x, W_hidden1), b_hidden1)
act_hidden1 = tf.nn.relu(y_hidden1)
# Hidden Layer2
W_hidden2 = tf.Variable(tf.random_uniform([hiddenWidth1, hiddenWidth2], weightInit, 1))
b_hidden2 = tf.Variable(tf.random_uniform([hiddenWidth2], weightInit, 1))
y_hidden2 = tf.add(tf.matmul(act_hidden1, W_hidden2), b_hidden2)
act_hidden2 = tf.nn.relu(y_hidden2)
# Output Layer
W_output = tf.Variable(tf.truncated_normal([hiddenWidth1, outputWidth], stddev=0.1))
#W_output = tf.Variable(tf.truncated_normal([hiddenWidth2, outputWidth], stddev=0.1))
b_output = tf.Variable(tf.constant(0.1, shape=[outputWidth]))
y = tf.add(tf.matmul(act_hidden1, W_output), b_output)
#y = tf.add(tf.matmul(act_hidden2, W_output), b_output)
predict = tf.argmax(y, 1)
'''
# Linear only
W = tf.Variable(tf.random_uniform([512, outputWidth], weightInit, 0.01))
b = tf.Variable(tf.random_uniform([outputWidth], weightInit, 0.01))
y = tf.add(tf.matmul(x, W), b)
predict = tf.argmax(y, 1)
'''
nextQ = tf.placeholder(shape=[None, outputWidth, ], dtype=tf.float32)
loss = tf.reduce_sum(tf.square(nextQ - y))
#loss = tf.nn.softmax_cross_entropy_with_logits(y, nextQ)
rate = tf.train.exponential_decay(0.0005, step, 250, 0.9999)
trainer = tf.train.AdamOptimizer(rate) #learning_rate=0.000001) # GradientDescentOptimizer(learning_rate=0.0001)
updateModel = trainer.minimize(loss, global_step=step)
# train_step = tf.train.AdamOptimizer().minimize(cross_entropy)
# train_step = tf.train.RMSPropOptimizer(0.1).minimize(cross_entropy)
# train_step = tf.train.GradientDescentOptimizer(0.1).minimize(cross_entropy)
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
#tf.global_variables_initializer().run()
tf.initialize_all_variables().run()
#sess.run(tf.initialize_all_variables())
# Train
maxQ = 1
iteration = 0
print('Training...')
for _ in range(20000000):
if (_ % 1000000 == 0 and _ > 0): # and False):
testPredictions = sess.run(predict, feed_dict={x: activations[int(math.ceil(activations.shape[0] * 0.8)) + 1:activations.shape[0],:]})
trainPredictions = sess.run(predict, feed_dict={x: activations[0:int(math.ceil(activations.shape[0] * 0.8)),:]})
print('Done ' + str(_) + ' iterations. testing error is: ' + str(100 * np.sum(np.sign(np.absolute(testPredictions - actions[int(math.ceil(activations.shape[0] * 0.8)) + 1:activations.shape[0]]))) * 1.0 / (activations.shape[0] - int(math.ceil(activations.shape[0] * 0.8)) + 1)) + '%, training error is: ' + str(100 * np.sum(np.sign(np.absolute(trainPredictions - actions[0:int(math.ceil(activations.shape[0] * 0.8))]))) * 1.0 / (int(math.ceil(activations.shape[0] * 0.8)))) + '%')
print('Loss: ' + str(loss_val) + ', Skill#: ' + str(skill))
index = np.random.randint(int(math.ceil(activations.shape[0] * 0.8)), size=batchSize)
'''
iteration = iteration + 1
iteration = iteration % int(math.ceil(activations.shape[0] * 0.8))
index = np.array([iteration])
'''
allQ = sess.run(y,feed_dict={x: activations[index, :]})
Q1 = sess.run(y,feed_dict={x: activations[index + 1, :]})
targetQ = np.ones(allQ.shape) * -1
#targetQ = allQ
for i in range(index.shape[0]):
if termination[index[i]] == 1:
Q = 0
else:
Q = np.max(Q1[i, :]) * gamma
# maxQ = max(maxQ, abs(Q))
targetQ[i, :] = targetQ[i, :] + Q - gamma * gamma
targetQ[i, int(actions[index[i]])] = targetQ[i, int(actions[index[i]])] + gamma * gamma
targetQ = targetQ * 1.0 / maxQ
'''
targetQ = np.zeros(allQ.shape)
for i in range(index.shape[0]):
targetQ[i, int(actions[index[i]])] = 1
'''
_, loss_val = sess.run([updateModel, loss], feed_dict={x: activations[index, :], nextQ: targetQ})
# Test trained model
print('Testing model on ' + str(len(actions[int(math.ceil(activations.shape[0] * 0.8)) + 1:activations.shape[0]])) + ' samples...')
prediction = tf.argmax(y,1)
predictions = prediction.eval(feed_dict={x: activations[int(math.ceil(activations.shape[0] * 0.8)) + 1:activations.shape[0],:]}, session=sess)
# print(predictions)
# print(actions[int(math.ceil(np.array(activations).shape[0] * 0.8)) + 1:np.array(activations).shape[0]])
print('Testing error:')
print(100 * np.sum(np.sign(np.absolute(predictions - actions[int(math.ceil(activations.shape[0] * 0.8)) + 1:activations.shape[0]]))) * 1.0 / (activations.shape[0] - int(math.ceil(activations.shape[0] * 0.8)) + 1))
print('Training error:')
predictions = prediction.eval(feed_dict={x: activations[0:int(math.ceil(activations.shape[0] * 0.8)),:]}, session=sess)
print(100 * np.sum(np.sign(np.absolute(predictions - actions[0:int(math.ceil(activations.shape[0] * 0.8))]))) * 1.0 / (int(math.ceil(activations.shape[0] * 0.8))))
dataOut.create_dataset('W_hidden_' + str(skill), data=sess.run(W_hidden1))
dataOut.create_dataset('b_hidden_' + str(skill), data=sess.run(b_hidden1))
dataOut.create_dataset('W_output_' + str(skill), data=sess.run(W_output))
dataOut.create_dataset('b_output_' + str(skill), data=sess.run(b_output))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-file', type=str, required=True,
help='Name of Skill extraction file.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main)
| 2.28125 | 2 |
corehq/apps/sms/management/commands/change_phonenumber_backend.py | dimagilg/commcare-hq | 471 | 12771663 | <filename>corehq/apps/sms/management/commands/change_phonenumber_backend.py<gh_stars>100-1000
import os
import sys
from collections import defaultdict
from django.core.management.base import BaseCommand
import csv
from corehq.util.log import with_progress_bar
from ...models import PhoneNumber, SQLMobileBackend
from ...util import clean_phone_number
class Command(BaseCommand):
help = "Reassign phone numbers with old backend id to new backend id"
def add_arguments(self, parser):
parser.add_argument("old_backend", help="Old backend ID")
parser.add_argument("--new-backend", help=(
"New backend ID. Dry-run if this option is absent. Use 'None' "
"to clear the old backend without specifying a new backend; "
"the phone number will use the domain/system default backend."
))
parser.add_argument("--domain", help="Limit to phone numbers in domain.")
parser.add_argument("--dump-csv",
help="Dump phone numbers to CSV file path "
"(the path is the value given for this option).")
def handle(self, old_backend, new_backend=None, domain=None, **options):
query = PhoneNumber.objects.filter(backend_id=old_backend)
if domain is not None:
query = query.filter(domain=domain)
if options["dump_csv"]:
dump_csv(query, options["dump_csv"])
print_counts_by_default_backend(query)
print("Total assigned to {}: {}".format(old_backend, len(query)))
if new_backend:
reassign(query, new_backend)
def dump_csv(query, path):
path = os.path.expanduser(path)
print("dumping to CSV: {}".format(path))
with open(path, "w", encoding="utf-8") as output:
csvfile = csv.writer(output)
csvfile.writerow(["domain", "couch_id", "phonenumber"])
for phone in query:
csvfile.writerow([
phone.domain,
phone.couch_id,
phone.phone_number,
])
def print_counts_by_default_backend(query):
counts = defaultdict(int)
for phone in with_progress_bar(query, len(query), oneline=True):
default_backend = SQLMobileBackend.load_default_by_phone_and_domain(
SQLMobileBackend.SMS,
clean_phone_number(phone.phone_number),
domain=phone.domain
)
counts[default_backend.name] += 1
print("Counts by default backend")
for default, count in sorted(counts.items()):
print("{:<25}{:>4}".format(default, count))
def reassign(query, new_backend):
if new_backend == "None":
new_backend = None
ok = confirm("Reassign to {}".format(new_backend))
if ok:
updated = query.update(backend_id=new_backend)
print("{} phone numbers updated".format(updated))
else:
print("abort")
sys.exit(1)
def confirm(msg):
return input(msg + " (y/N) ").lower() == 'y'
| 2.265625 | 2 |
airflow/contrib/operators/pubsub_operator.py | diggzhang/airflow-dingit | 6 | 12771664 | <filename>airflow/contrib/operators/pubsub_operator.py<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.contrib.hooks.gcp_pubsub_hook import PubSubHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class PubSubTopicCreateOperator(BaseOperator):
"""Create a PubSub topic.
By default, if the topic already exists, this operator will
not cause the DAG to fail.
```
with DAG('successful DAG') as dag:
(
dag
>> PubSubTopicCreateOperator(topic='my_new_topic')
>> PubSubTopicCreateOperator(topic='my_new_topic')
)
```
The operator can be configured to fail if the topic already exists.
```
with DAG('failing DAG') as dag:
(
dag
>> PubSubTopicCreateOperator(topic='my_new_topic')
>> PubSubTopicCreateOperator(topic='my_new_topic',
fail_if_exists=True)
)
```
Both ``project`` and ``topic`` are templated so you can use
variables in them.
"""
template_fields = ['project', 'topic']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
project,
topic,
fail_if_exists=False,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
"""
:param project: the GCP project name or ID in which to work
(templated)
:type project: string
:param topic: the topic to create. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:type topic: string
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request
must have domain-wide delegation enabled.
:type delegate_to: string
"""
super(PubSubTopicCreateOperator, self).__init__(*args, **kwargs)
self.project = project
self.topic = topic
self.fail_if_exists = fail_if_exists
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context):
hook = PubSubHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
hook.create_topic(self.project, self.topic,
fail_if_exists=self.fail_if_exists)
class PubSubPublishOperator(BaseOperator):
"""Publish messages to a PubSub topic.
Each Task publishes all provided messages to the same topic
in a single GCP project. If the topic does not exist, this
task will fail.
```
from base64 import b64encode as b64e
m1 = {'data': b64e('Hello, World!'),
'attributes': {'type': 'greeting'}
}
m2 = {'data': b64e('Knock, knock')}
m3 = {'attributes': {'foo': ''}}
t1 = PubSubPublishOperator(
topic='my_topic',
messages=[m1, m2, m3],
create_topic=True,
dag=dag)
```
Both ``project`` and ``topic`` are templated so you can use
variables in them.
"""
template_fields = ['project', 'topic', 'messages']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
project,
topic,
messages,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
"""
:param project: the GCP project name or ID in which to work
(templated)
:type project: string
:param topic: the topic to which to publish. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:type topic: string
:param messages: a list of messages to be published to the
topic. Each message is a dict with one or more of the
following keys-value mappings:
* 'data': a base64-encoded string
* 'attributes': {'key1': 'value1', ...}
Each message must contain at least a non-empty 'data' value
or an attribute dict with at least one key. See
https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage
(templated)
:type messages: list
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request
must have domain-wide delegation enabled.
:type delegate_to: string
"""
super(PubSubPublishOperator, self).__init__(*args, **kwargs)
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.project = project
self.topic = topic
self.messages = messages
def execute(self, context):
hook = PubSubHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
hook.publish(self.project, self.topic, self.messages)
| 2.421875 | 2 |
pastebin/paste.py | 11mariom/pastebin | 1 | 12771665 | # -*- encoding: utf-8 -*-
#from flask_sqlalchemy import SQLAlchemy
#db = SQLAlchemy()
from sqlalchemy import Column, Integer, Text
from database import db
from database import Base
class Paste(Base):
__tablename__ = 'paste'
id = Column(Integer, primary_key = True)
#hash = db.Column(db.String(32), unique = True)
data = Column(Text())
def __init__(self):
return None
def __repr__(self):
return '<Paste %r>' % (self.id)
#return "<Paste :%s, %s>" % (self.id, self.data)
def get(self, id):
return Paste.query.filter_by(id=id).first()
def get_id(self):
return self.id
def get_data(self):
return self.data
def put(self, data):
self.data = data
db.add(self)
db.commit()
return self.id
| 2.984375 | 3 |
dark/codonDistance.py | UdoGi/dark-matter | 10 | 12771666 | <reponame>UdoGi/dark-matter<filename>dark/codonDistance.py<gh_stars>1-10
from collections import defaultdict
def findDistance(co1, co2):
"""
Find the distance between two codons.
@param co1: A C{str} of length three.
@param co2: A C{str} of length three.
@return: An C{int} distance in [0, 3].
"""
return (co1[0] != co2[0]) + (co1[1] != co2[1]) + (co1[2] != co2[2])
def codonInformation(codons1, codons2):
"""
Take two C{list} of codons, and returns information about the min and
max distance between them.
@param codon1: a C{list} of codons.
@param codon2: a C{list} of codons.
@return: a dict whose keys are C{int} distances and whose values are lists
of pairs of codons.
"""
result = defaultdict(list)
for c1 in codons1:
for c2 in codons2:
distance = findDistance(c1, c2)
result[distance].append([c1, c2])
return result
| 2.984375 | 3 |
external/plex/dist/tests/test10.py | almartin82/bayeslite | 964 | 12771667 | <filename>external/plex/dist/tests/test10.py
# Test traditional regular expression syntax.
import Test
from Plex.Traditional import re
from Plex.Errors import PlexError
from Plex import Seq, AnyBut
def test_err(s):
try:
print re(s)
except PlexError, e:
print e
print re("")
print re("a")
print re("[a]")
print re("[ab]")
print re("[abc]")
print re("[a-c]")
print re("[a-cd]")
print re("[a-cg-i]")
print re("[^a]")
print re("[^a-cg-i]")
print re("[-]")
print re("[-abc]")
print re("[abc-]")
print re("[]]")
print re("[]-]")
print re("[^-]")
print re("[^-abc]")
print re("[^abc-]")
print re("[^]]")
print re("[^]-]")
print re("a*")
print re("a+")
print re("a?")
print re("a*+?")
print re("ab")
print re("a|b")
print re("abcde")
print re("a|b|c|d|e")
print re("abc|def|ghi")
print re("abc(def|ghi)")
print re("ab\(c\[de")
print re("^abc$")
print str(re(".")) == str(Seq(AnyBut('\n')))
test_err("abc(de")
test_err("abc[de")
test_err("abc)de")
| 2.6875 | 3 |
lomond/events.py | johnashu/dataplicity-lomond | 225 | 12771668 | from __future__ import unicode_literals
import json
import time
class Event(object):
"""Base class for a websocket 'event'."""
__slots__ = ['received_time']
def __init__(self):
self.received_time = time.time()
def __repr__(self):
return "{}()".format(self.__class__.__name__)
@classmethod
def _summarize_bytes(cls, data, max_len=24):
"""Avoid spamming logs by truncating byte strings in repr."""
if len(data) > max_len:
return "{!r} + {} bytes".format(
data[:max_len],
len(data) - max_len
)
return repr(data)
@classmethod
def _summarize_text(cls, text, max_len=24):
"""Avoid spamming logs by truncating text."""
if len(text) > max_len:
return "{!r} + {} chars".format(
text[:max_len],
len(text) - max_len
)
return repr(text)
class Poll(Event):
"""A generated poll event."""
name = 'poll'
class Connecting(Event):
"""
Generated prior to establishing a websocket connection to a server.
:param url: The websocket URL the websocket is connecting to.
"""
__slots__ = ['url']
name = 'connecting'
def __init__(self, url):
self.url = url
super(Connecting, self).__init__()
def __repr__(self):
return "{}(url='{}')".format(self.__class__.__name__, self.url)
class ConnectFail(Event):
"""
Generate when Lomond was unable to connect to a Websocket server.
:param reason: A short description of the reason for the
failure.
:type reason: str
"""
__slots__ = ['reason']
name = 'connect_fail'
def __init__(self, reason):
self.reason = reason
super(ConnectFail, self).__init__()
def __repr__(self):
return "{}(reason='{}')".format(
self.__class__.__name__,
self.reason,
)
class Connected(Event):
"""Generated when Lomond has connected to a server but not yet
negotiated the websocket upgrade.
:param str url: The websocket URL connected to.
:param str proxy: The proxy URL connected to (or None).
"""
__slots__ = ['url', 'proxy']
name = 'connected'
def __init__(self, url, proxy=None):
self.url = url
self.proxy = proxy
super(Connected, self).__init__()
def __repr__(self):
_class = self.__class__.__name__
return (
"{}(url='{}')".format(_class, self.url)
if self.proxy is None else
"{}(url='{}', proxy='{}')".format(
_class, self.url, self.proxy
)
)
class Rejected(Event):
"""Server rejected WS connection."""
__slots__ = ['response', 'reason']
name = 'rejected'
def __init__(self, response, reason):
"""
Generated when Lomond is connected to the server, but the
websocket upgrade failed.
:param response: The response returned by the server.
:param str reason: A description of why the connection was
rejects.
"""
self.response = response
self.reason = reason
super(Rejected, self).__init__()
def __repr__(self):
return "{}(response={!r}, reason='{}')".format(
self.__class__.__name__,
self.response,
self.reason
)
class Ready(Event):
"""Generated when Lomond has connected to the server,
and successfully negotiated the websocket upgrade.
:param response: A :class:`~lomond.response.Response` object.
:param str protocol: A websocket protocol or ``None`` if no protocol
was supplied.
:param set extensions: A set of negotiated websocket extensions.
Currently only the ``'permessage-deflate'`` extension is supported.
"""
__slots__ = ['response', 'protocol', 'extensions']
name = 'ready'
def __init__(self, response, protocol, extensions):
self.response = response
self.protocol = protocol
self.extensions = extensions
super(Ready, self).__init__()
def __repr__(self):
return '{}(response={!r}, protocol={!r}, extensions={!r})'.format(
self.__class__.__name__,
self.response,
self.protocol,
self.extensions
)
class ProtocolError(Event):
"""Generated when the server deviates from the protocol.
:param str error: A description of the error.
:param bool critical: Indicates if the error is considered
'critical'. If ``True``, Lomond will disconnect immediately.
If ``False``, Lomond will send a close message to the server.
"""
__slots__ = ['error', 'critical']
name = 'protocol_error'
def __init__(self, error, critical):
self.error = error
self.critical = critical
super(ProtocolError, self).__init__()
def __repr__(self):
return "{}(error='{}', critical={!r})".format(
self.__class__.__name__,
self.error,
self.critical
)
class Unresponsive(Event):
"""The server has not responding to pings within `ping_timeout`
seconds.
Will be followed by a :class:`~lomond.events.Disconnected` event.
"""
name = 'unresponsive'
class Disconnected(Event):
"""Generated when a websocket connection has
been dropped.
:param str reason: A description of why the websocket was closed.
:param bool graceful: Flag indicating if the connection was dropped
gracefully (`True`), or disconnected due to a socket failure
(`False`) or other problem.
"""
__slots__ = ['graceful', 'reason']
name = 'disconnected'
def __init__(self, reason='closed', graceful=False):
self.reason = reason
self.graceful = graceful
super(Disconnected, self).__init__()
def __repr__(self):
return "{}(reason='{}', graceful={!r})".format(
self.__class__.__name__,
self.reason,
self.graceful
)
class Closed(Event):
"""Generated when the websocket was closed. The websocket may no
longer send packets after this event has been received. This event
will be followed by :class:`~lomond.events.Disconnected`.
:param code: The closed code returned from the server.
:param str reason: An optional description why the websocket was
closed, as returned from the server.
"""
__slots__ = ['code', 'reason']
name = 'closed'
def __init__(self, code, reason):
self.code = code
self.reason = reason
super(Closed, self).__init__()
def __repr__(self):
return '{}(code={!r}, reason={!r})'.format(
self.__class__.__name__,
self.code,
self.reason,
)
class Closing(Event):
"""Generated when the server is closing the connection.
No more messages will be received from the server, but you may still
send messages while handling this event. A
:class:`~lomond.events.Disconnected` event should be generated
shortly after this event.
:param code: The closed code returned from the server.
:param str reason: An optional description why the websocket was
closed, as returned from the server.
"""
__slots__ = ['code', 'reason']
name = 'closing'
def __init__(self, code, reason):
self.code = code
self.reason = reason
super(Closing, self).__init__()
def __repr__(self):
return '{}(code={!r}, reason={!r})'.format(
self.__class__.__name__,
self.code,
self.reason,
)
class UnknownMessage(Event):
"""
An application message was received, with an unknown opcode.
"""
__slots__ = ['message']
name = 'unknown'
def __init__(self, message):
self.message = message
super(UnknownMessage, self).__init__()
class Ping(Event):
"""Generated when Lomond received a ping packet from the server.
:param bytes data: Ping payload data.
"""
__slots__ = ['data']
name = 'ping'
def __init__(self, data):
self.data = data
super(Ping, self).__init__()
def __repr__(self):
return "{}(data={!r})".format(self.__class__.__name__, self.data)
class Pong(Event):
"""Generated when Lomond receives a pong packet from the server.
:param bytes data: The pong payload data.
"""
__slots__ = ['data']
name = 'pong'
def __init__(self, data):
self.data = data
super(Pong, self).__init__()
def __repr__(self):
return "{}(data={!r})".format(self.__class__.__name__, self.data)
class Text(Event):
"""Generated when Lomond receives a text message from the server.
:param str text: The text payload.
"""
__slots__ = ['text', '_json']
name = 'text'
def __init__(self, text):
self.text = text
self._json = None
super(Text, self).__init__()
@property
def json(self):
"""Text decoded as JSON.
Calls ``json.loads`` to decode the ``text`` attribute, and may
throw the same exceptions if the text is not valid json.
"""
if self._json is None:
self._json = json.loads(self.text)
return self._json
def __repr__(self):
return "{}(text={})".format(
self.__class__.__name__,
self._summarize_text(self.text)
)
class Binary(Event):
"""Generated when Lomond receives a binary message from the server.
:param bytes data: The binary payload.
"""
__slots__ = ['data']
name = 'binary'
def __init__(self, data):
self.data = data
super(Binary, self).__init__()
def __repr__(self):
return "{}(data={})".format(
self.__class__.__name__,
self._summarize_bytes(self.data)
)
class BackOff(Event):
"""Generated when a persistent connection has to wait before re-
attempting a connection.
:param float delay: The delay (in seconds) before Lomond will re-
attempt to connect.
"""
__slots__ = ['delay']
name = 'back_off'
def __init__(self, delay):
self.delay = delay
super(BackOff, self).__init__()
def __repr__(self):
return "{}(delay={:0.1f})".format(
self.__class__.__name__,
self.delay
)
| 3.015625 | 3 |
twittoff/twitter.py | rgiuffre90/twittoff | 0 | 12771669 | """Retrive Tweets, word embeddings, and populate DB"""
import tweepy
import spacy
from .models import DB, Tweet, User
from os import getenv
TWITTER_API_KEY = getenv('TWITTER_API_KEY')
TWITTER_API_KEY_SECRET = getenv('TWITTER_API_KEY_SECRET')
TWITTER_AUTH = tweepy.OAuthHandler(TWITTER_API_KEY, TWITTER_API_KEY_SECRET)
TWITTER = tweepy.API(TWITTER_AUTH)
nlp = spacy.load('my_model')
def vectorize_tweet(tweet_text):
return nlp(tweet_text).vector
def add_or_update_user(username):
try:
"""Allows us to add/update users to our DB"""
twitter_user = TWITTER.get_user(username)
db_user = (User.query.get(twitter_user.id)) or User(id=twitter_user.id, name=username)
DB.session.add(db_user)
tweets = twitter_user.timeline(
count=200, exclude_replies=True,
include_rts=False, tweet_mode='extended'
)
if tweets:
db_user.newest_tweet_id = tweets[0].id
for tweet in tweets:
vectorize_tweet = vectorize_tweet(tweet.full_text)
db_tweet = Tweet(
id=tweet.id, text=tweet.full_text,
vect = vectorize_tweet
)
db_user.tweets.append(db_tweet)
DB.session.add(db_tweet)
except Exception as e:
print('Error Processing: {}: {}'.format(username, e))
raise e
else:
DB.session.commit()
def insert_example_users():
# using our functions to add two users
add_or_update_user('elonmusk')
add_or_update_user('jackblack') | 3.078125 | 3 |
tests/checks/check.py | ptrahrens/ReproBLAS | 6 | 12771670 | import os
from tests.checks import checks
from tests.harness import harness
from scripts import terminal
check_dir = os.path.dirname(os.path.abspath(__file__))
check_suite = checks.CheckSuite()
folds = [3]
inf_folds = [3]
incs = [1]
FLT_BIN_WIDTH=13
FLT_MAX_EXP=128
FLT_BIG_EXP=13
FLT_SMALL_EXP=-12
FLT_MIN_EXP=-125
FLT_MANT_DIG=24
FLT_ONES = 0
for i in range(FLT_MANT_DIG):
FLT_ONES += 2.0 ** -i
DBL_BIN_WIDTH=41
DBL_MAX_EXP=1024
DBL_BIG_EXP=27
DBL_SMALL_EXP=-27
DBL_MIN_EXP=-1021
DBL_MANT_DIG=53
DBL_ONES = 0
for i in range(DBL_MANT_DIG):
DBL_ONES += 2.0 ** -i
check_suite.add_checks([checks.ValidateInternalDSCALETest(),\
checks.ValidateInternalSSCALETest()],\
["N", "incX"],\
[[4], [1]])
check_suite.add_checks([checks.ValidateInternalUFPTest(),\
checks.ValidateInternalUFPFTest()],\
["N", "incX"],\
[[10], [1, 2, 4]])
check_suite.add_checks([checks.ValidateInternalDINDEXTest(),\
checks.ValidateInternalSINDEXTest(),\
checks.ValidateInternalDMINDEXTest(),\
checks.ValidateInternalSMINDEXTest()],\
["N", "incX"],\
[[4], [1]])
check_suite.add_checks([checks.ValidateInternalDAMAXTest(),\
checks.ValidateInternalZAMAXTest(),\
checks.ValidateInternalSAMAXTest(),\
checks.ValidateInternalCAMAXTest()],\
["N", "incX"],\
[[4095], [1, 2, 4]])
check_suite.add_checks([checks.ValidateInternalRDSUMTest(),\
checks.ValidateInternalDBDBADDTest(),\
checks.ValidateInternalDIDADDTest(),\
checks.ValidateInternalDIDDEPOSITTest(),\
checks.ValidateInternalRSSUMTest(),\
checks.ValidateInternalSBSBADDTest(),\
checks.ValidateInternalSISADDTest(),\
checks.ValidateInternalSISDEPOSITTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX"],\
[[4095], folds, incs, [1.0, -1.0],\
["constant",\
"mountain",\
"+big",\
"++big",\
"+-big",\
"sine"]])
check_suite.add_checks([checks.ValidateInternalRZSUMTest(),\
checks.ValidateInternalZBZBADDTest(),\
checks.ValidateInternalZIZADDTest(),\
checks.ValidateInternalZIZDEPOSITTest(),\
checks.ValidateInternalRCSUMTest(),\
checks.ValidateInternalCBCBADDTest(),\
checks.ValidateInternalCICADDTest(),\
checks.ValidateInternalCICDEPOSITTest()],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX"],\
[[4095], folds, incs, [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0],\
["constant",\
"mountain",\
"+big",\
"++big",\
"+-big",\
"sine"]])
check_suite.add_checks([checks.ValidateInternalRDNRM2Test(),\
checks.ValidateInternalRDASUMTest(),\
checks.ValidateInternalRSNRM2Test(),\
checks.ValidateInternalRSASUMTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX"],\
[[4095], folds, incs, [1.0, -1.0],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRDZNRM2Test(),\
checks.ValidateInternalRDZASUMTest(),\
checks.ValidateInternalRSCNRM2Test(),\
checks.ValidateInternalRSCASUMTest(),\
],\
["N", "fold", "incX", ("RealScaleX", "ImagScaleX"), "FillX"],\
[[4095], folds, incs, [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRDDOTTest(),\
checks.ValidateInternalRSDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "RealScaleY", "FillX", "FillY"],\
[[4095], folds, incs, [1.0, -1.0], [1.0, -1.0],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRDDOTTest(),\
checks.ValidateInternalRSDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "RealScaleY", ("FillX", "FillY")],\
[[4095], folds, incs, [1.0, -1.0], [1.0, -1.0],\
[("constant", "sine"),\
("sine", "constant")]])
check_suite.add_checks([checks.ValidateInternalRDDOTTest(),\
checks.ValidateInternalRSDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "RealScaleY", ("FillX", "FillY")],\
[[4095], folds, incs, [1.0, -1.0], [1.0, -1.0],\
[("constant", "mountain"),\
("mountain", "constant")]])
check_suite.add_checks([checks.ValidateInternalRZDOTUTest(),\
checks.ValidateInternalRZDOTCTest(),\
checks.ValidateInternalRCDOTUTest(),\
checks.ValidateInternalRCDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "RealScaleY", "ImagScaleY", "FillX", "FillY"],\
[[4095], folds, incs, [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRZDOTUTest(),\
checks.ValidateInternalRZDOTCTest(),\
checks.ValidateInternalRCDOTUTest(),\
checks.ValidateInternalRCDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "RealScaleY", "ImagScaleY", ("FillX", "FillY")],\
[[4095], folds, incs, [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0],\
[("constant", "sine"),\
("sine", "constant")]])
check_suite.add_checks([checks.ValidateInternalRZDOTUTest(),\
checks.ValidateInternalRZDOTCTest(),\
checks.ValidateInternalRCDOTUTest(),\
checks.ValidateInternalRCDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "RealScaleY", "ImagScaleY", ("FillX", "FillY")],\
[[4095], folds, incs, [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0],\
[("constant", "mountain"),\
("mountain", "constant")]])
check_suite.add_checks([checks.ValidateInternalRDSUMTest(),\
checks.ValidateInternalRDASUMTest(),\
checks.ValidateInternalRDNRM2Test(),\
checks.ValidateInternalDBDBADDTest(),\
checks.ValidateInternalDIDADDTest(),\
checks.ValidateInternalDIDDEPOSITTest(),\
checks.ValidateInternalRSSUMTest(),\
checks.ValidateInternalRSASUMTest(),\
checks.ValidateInternalRSNRM2Test(),\
checks.ValidateInternalSBSBADDTest(),\
checks.ValidateInternalSISADDTest(),\
checks.ValidateInternalSISDEPOSITTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX"],\
[[255], inf_folds, incs, [1.0, -1.0],\
["+inf",\
"++inf",\
"+-inf",\
"nan",\
"+inf_nan",\
"++inf_nan",\
"+-inf_nan"]])
check_suite.add_checks([checks.ValidateInternalRZSUMTest(),\
checks.ValidateInternalRDZASUMTest(),\
checks.ValidateInternalRDZNRM2Test(),\
checks.ValidateInternalZBZBADDTest(),\
checks.ValidateInternalZIZADDTest(),\
checks.ValidateInternalZIZDEPOSITTest(),\
checks.ValidateInternalRCSUMTest(),\
checks.ValidateInternalRSCASUMTest(),\
checks.ValidateInternalRSCNRM2Test(),\
checks.ValidateInternalCBCBADDTest(),\
checks.ValidateInternalCICADDTest(),\
checks.ValidateInternalCICDEPOSITTest()],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX"],\
[[255], inf_folds, incs, [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0],\
["+inf",\
"++inf",\
"+-inf",\
"nan",\
"+inf_nan",\
"++inf_nan",\
"+-inf_nan"]])
check_suite.add_checks([checks.ValidateInternalRDDOTTest(),\
checks.ValidateInternalRSDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "RealScaleY", "FillX", "FillY"],\
[[255], inf_folds, incs, [1.0, -1.0], [1.0, -1.0],\
["constant",\
"+inf",\
"++inf",\
"+-inf",\
"nan",\
"+inf_nan",\
"++inf_nan",\
"+-inf_nan"],\
["constant",\
"+inf",\
"++inf",\
"+-inf",\
"nan",\
"+inf_nan",\
"++inf_nan",\
"+-inf_nan"]])
check_suite.add_checks([checks.ValidateInternalRZDOTUTest(),\
checks.ValidateInternalRZDOTCTest(),\
checks.ValidateInternalRCDOTUTest(),\
checks.ValidateInternalRCDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "RealScaleY", "ImagScaleY", "FillX", "FillY"],\
[[255], inf_folds, incs, [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0], [-1.0, 0.0, 1.0],
["constant",\
"+inf",\
"++inf",\
"+-inf",\
"nan",\
"+inf_nan",\
"++inf_nan",\
"+-inf_nan"],\
["constant",\
"+inf",\
"++inf",\
"+-inf",\
"nan",\
"+inf_nan",\
"++inf_nan",\
"+-inf_nan"]])
check_suite.add_checks([checks.ValidateXBLASRDDOTTest(),\
checks.ValidateXBLASRZDOTUTest(),\
checks.ValidateXBLASRZDOTCTest(),\
checks.ValidateXBLASRSDOTTest(),\
checks.ValidateXBLASRCDOTUTest(),\
checks.ValidateXBLASRCDOTCTest()],\
["N", "incX", "incY", "norm"],\
[[1, 2, 3, 4, 5, 6, 7, 8, 15, 16, 63, 64, 4095, 4096], [1, 2, 4], [1, 2, 4], [-1, 0, 1]])
check_suite.add_checks([checks.VerifyRDSUMTest(),\
checks.VerifyRDASUMTest(),\
checks.VerifyDBDBADDTest(),\
checks.VerifyDIDADDTest(),\
checks.VerifyDIDDEPOSITTest(),\
checks.VerifyRZSUMTest(),\
checks.VerifyRDZASUMTest(),\
checks.VerifyZBZBADDTest(),\
checks.VerifyZIZADDTest(),\
checks.VerifyZIZDEPOSITTest(),\
checks.VerifyRSSUMTest(),\
checks.VerifyRSASUMTest(),\
checks.VerifySBSBADDTest(),\
checks.VerifySISADDTest(),\
checks.VerifySISDEPOSITTest(),\
checks.VerifyRCSUMTest(),\
checks.VerifyRSCASUMTest(),\
checks.VerifyCBCBADDTest(),\
checks.VerifyCICADDTest(),\
checks.VerifyCICDEPOSITTest()],\
["N", "fold", "B", "incX", "RealScaleX", "FillX"],\
[[4095], folds, [256], incs, [0],\
["constant"]])
check_suite.add_checks([checks.VerifyRDSUMTest(),\
checks.VerifyRDASUMTest(),\
checks.VerifyRDNRM2Test(),\
checks.VerifyDIDSSQTest(),\
checks.VerifyDBDBADDTest(),\
checks.VerifyDIDADDTest(),\
checks.VerifyDIDDEPOSITTest(),\
checks.VerifyRZSUMTest(),\
checks.VerifyRDZASUMTest(),\
checks.VerifyRDZNRM2Test(),\
checks.VerifyDIZSSQTest(),\
checks.VerifyZBZBADDTest(),\
checks.VerifyZIZADDTest(),\
checks.VerifyZIZDEPOSITTest(),\
checks.VerifyRSSUMTest(),\
checks.VerifyRSASUMTest(),\
checks.VerifyRSNRM2Test(),\
checks.VerifySISSSQTest(),\
checks.VerifySBSBADDTest(),\
checks.VerifySISADDTest(),\
checks.VerifySISDEPOSITTest(),\
checks.VerifyRCSUMTest(),\
checks.VerifyRSCASUMTest(),\
checks.VerifyRSCNRM2Test(),\
checks.VerifySICSSQTest(),\
checks.VerifyCBCBADDTest(),\
checks.VerifyCICADDTest(),\
checks.VerifyCICDEPOSITTest()],\
["N", "fold", "B", "incX", "FillX"],\
[[4095], folds, [256], incs,\
["rand",\
"rand+(rand-1)",\
"sine",\
"small+grow*big"]])
check_suite.add_checks([checks.VerifyRDDOTTest(),\
checks.VerifyRZDOTUTest(),\
checks.VerifyRZDOTCTest(),\
checks.VerifyRSDOTTest(),\
checks.VerifyRCDOTUTest(),\
checks.VerifyRCDOTCTest()],\
["N", "fold", "incX", "incY", "FillX", "FillY"],\
[[4095], folds, incs, incs,\
["rand",\
"rand+(rand-1)",\
"sine",\
"small+grow*big"],\
["rand",\
"rand+(rand-1)",\
"sine",\
"small+grow*big"]])
for i in range(DBL_BIN_WIDTH + 2):
check_suite.add_checks([checks.ValidateInternalRDSUMTest(),\
checks.ValidateInternalDBDBADDTest(),\
checks.ValidateInternalDIDADDTest(),\
checks.ValidateInternalDIDDEPOSITTest(),\
checks.ValidateInternalRDASUMTest(),\
checks.ValidateInternalRDNRM2Test(),\
checks.ValidateInternalRDDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX", "FillY"],\
[[8192], folds, incs, [DBL_ONES + 2 ** i],\
["constant"],\
["constant"]])
check_suite.add_checks([checks.ValidateInternalRDSUMTest(),\
checks.ValidateInternalDBDBADDTest(),\
checks.ValidateInternalDIDADDTest(),\
checks.ValidateInternalDIDDEPOSITTest(),\
checks.ValidateInternalRDASUMTest(),\
checks.ValidateInternalRDNRM2Test(),\
checks.ValidateInternalRDDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX", "FillY"],\
[[32], folds, incs,\
[1.5 * 2**(DBL_MAX_EXP - DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - DBL_SMALL_EXP + i)],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant"]])
check_suite.add_checks([checks.ValidateInternalRDDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX", "FillY"],\
[[32], folds, incs,\
[1.5 * 2**(DBL_MAX_EXP - DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - DBL_SMALL_EXP + i)],\
["constant"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRDDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX", "FillY"],\
[[32], folds, incs,\
[1.5 * 2**(DBL_MAX_EXP - 2 * DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - 2 * DBL_SMALL_EXP + i)],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRZSUMTest(),\
checks.ValidateInternalZBZBADDTest(),\
checks.ValidateInternalZIZADDTest(),\
checks.ValidateInternalZIZDEPOSITTest(),\
checks.ValidateInternalRDZASUMTest(),\
checks.ValidateInternalRDZNRM2Test(),\
checks.ValidateInternalRZDOTUTest(),\
checks.ValidateInternalRZDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX", "FillY"],\
[[16], folds, incs,\
[1.5 * 2**(DBL_MAX_EXP - DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - DBL_SMALL_EXP + i)],\
[1.5 * 2**(DBL_MAX_EXP - DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - DBL_SMALL_EXP + i)],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant"]])
check_suite.add_checks([checks.ValidateInternalRZDOTUTest(),\
checks.ValidateInternalRZDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX", "FillY"],\
[[16], folds, incs,\
[1.5 * 2**(DBL_MAX_EXP - DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - DBL_SMALL_EXP + i)],\
[1.5 * 2**(DBL_MAX_EXP - DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - DBL_SMALL_EXP + i)],\
["constant"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRZDOTUTest(),\
checks.ValidateInternalRZDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX", "FillY"],\
[[16], folds, incs,\
[1.5 * 2**(DBL_MAX_EXP - 2 * DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - 2 * DBL_SMALL_EXP + i)],\
[1.5 * 2**(DBL_MAX_EXP - 2 * DBL_BIG_EXP - 6 - i), 0.75 * 2**(DBL_MIN_EXP - 2 * DBL_SMALL_EXP + i)],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
for i in range(FLT_BIN_WIDTH + 2):
check_suite.add_checks([checks.ValidateInternalRSSUMTest(),\
checks.ValidateInternalSBSBADDTest(),\
checks.ValidateInternalSISADDTest(),\
checks.ValidateInternalSISDEPOSITTest(),\
checks.ValidateInternalRSASUMTest(),\
checks.ValidateInternalRSNRM2Test(),\
checks.ValidateInternalRSDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX", "FillY"],\
[[8192], folds, incs, [FLT_ONES * 2.0 ** i],\
["constant",],\
["constant"]])
check_suite.add_checks([checks.ValidateInternalRSSUMTest(),\
checks.ValidateInternalSBSBADDTest(),\
checks.ValidateInternalSISADDTest(),\
checks.ValidateInternalSISDEPOSITTest(),\
checks.ValidateInternalRSASUMTest(),\
checks.ValidateInternalRSNRM2Test(),\
checks.ValidateInternalRSDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX", "FillY"],\
[[32], folds, incs,\
[1.5 * 2**(FLT_MAX_EXP - FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - FLT_SMALL_EXP + i)],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant"]])
check_suite.add_checks([checks.ValidateInternalRSDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX", "FillY"],\
[[32], folds, incs,\
[1.5 * 2**(FLT_MAX_EXP - FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - FLT_SMALL_EXP + i)],\
["constant"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRSDOTTest(),\
],\
["N", "fold", "incX", "RealScaleX", "FillX", "FillY"],\
[[32], folds, incs,\
[1.5 * 2**(FLT_MAX_EXP - 2 * FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - 2 * FLT_SMALL_EXP + i)],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRCSUMTest(),\
checks.ValidateInternalCBCBADDTest(),\
checks.ValidateInternalCICADDTest(),\
checks.ValidateInternalCICDEPOSITTest(),\
checks.ValidateInternalRSCASUMTest(),\
checks.ValidateInternalRSCNRM2Test(),\
checks.ValidateInternalRCDOTUTest(),\
checks.ValidateInternalRCDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX", "FillY"],\
[[16], folds, incs,\
[1.5 * 2**(FLT_MAX_EXP - FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - FLT_SMALL_EXP + i)],\
[1.5 * 2**(FLT_MAX_EXP - FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - FLT_SMALL_EXP + i)],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant"]])
check_suite.add_checks([checks.ValidateInternalRCDOTUTest(),\
checks.ValidateInternalRCDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX", "FillY"],\
[[16], folds, incs,\
[1.5 * 2**(FLT_MAX_EXP - FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - FLT_SMALL_EXP + i)],\
[1.5 * 2**(FLT_MAX_EXP - FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - FLT_SMALL_EXP + i)],\
["constant"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalRCDOTUTest(),\
checks.ValidateInternalRCDOTCTest(),\
],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX", "FillY"],\
[[16], folds, incs,\
[1.5 * 2**(FLT_MAX_EXP - 2 * FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - 2 * FLT_SMALL_EXP + i)],\
[1.5 * 2**(FLT_MAX_EXP - 2 * FLT_BIG_EXP - 6 - i), 0.75 * 2**(FLT_MIN_EXP - 2 * FLT_SMALL_EXP + i)],\
["constant",\
"+big",\
"++big",\
"+-big"],\
["constant",\
"+big",\
"++big",\
"+-big"]])
check_suite.add_checks([checks.ValidateInternalDBDBADDTest(),\
checks.ValidateInternalDIDADDTest(),\
checks.ValidateInternalDIDDEPOSITTest(),\
checks.ValidateInternalRDSUMTest(),\
checks.ValidateInternalRDASUMTest(),\
checks.ValidateInternalRDDOTTest(),\
checks.ValidateInternalZBZBADDTest(),\
checks.ValidateInternalZIZADDTest(),\
checks.ValidateInternalZIZDEPOSITTest(),\
checks.ValidateInternalRZSUMTest(),\
checks.ValidateInternalRDZASUMTest(),\
checks.ValidateInternalRZDOTUTest(),\
checks.ValidateInternalRZDOTCTest()],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX"],\
[[1], folds, incs, [DBL_ONES * 2 **(DBL_MAX_EXP - 1), 1.0], [DBL_ONES * 2 **(DBL_MAX_EXP - 1), 1.0],\
["constant",]])
check_suite.add_checks([checks.ValidateInternalSBSBADDTest(),\
checks.ValidateInternalSISADDTest(),\
checks.ValidateInternalSISDEPOSITTest(),\
checks.ValidateInternalRSSUMTest(),\
checks.ValidateInternalRSASUMTest(),\
checks.ValidateInternalRSDOTTest(),\
checks.ValidateInternalCBCBADDTest(),\
checks.ValidateInternalCICADDTest(),\
checks.ValidateInternalCICDEPOSITTest(),\
checks.ValidateInternalRCSUMTest(),\
checks.ValidateInternalRSCASUMTest(),\
checks.ValidateInternalRCDOTUTest(),\
checks.ValidateInternalRCDOTCTest()],\
["N", "fold", "incX", "RealScaleX", "ImagScaleX", "FillX"],\
[[1], folds, incs, [FLT_ONES * 2 **(FLT_MAX_EXP - 1), 1.0], [FLT_ONES * 2 **(FLT_MAX_EXP - 1), 1.0],\
["constant",]])
check_suite.add_checks([checks.CorroborateRDGEMVTest(),\
checks.CorroborateRZGEMVTest(),\
checks.CorroborateRSGEMVTest(),\
checks.CorroborateRCGEMVTest(),\
],\
["O", "T", "M", "N", "lda", ("incX", "incY"), "FillA", "FillX", "FillY", ("RealAlpha", "ImagAlpha"), ("RealBeta", "ImagBeta"), "fold"],\
[["RowMajor", "ColMajor"], ["Trans", "NoTrans"], [255, 512], [255, 512], [0, -15], list(zip(incs, incs)),\
["rand",\
],\
["rand",\
],\
["rand"],\
[(0.0, 0.0), (1.0, 0.0), (2.0, 2.0)],\
[(0.0, 0.0), (1.0, 0.0), (2.0, 2.0)],\
folds])
check_suite.add_checks([checks.CorroborateRDGEMMTest(),
checks.CorroborateRZGEMMTest(),\
checks.CorroborateRSGEMMTest(),\
checks.CorroborateRCGEMMTest(),\
],\
["O", "TransA", "TransB", "M", "N", "K", ("lda", "ldb", "ldc"), "FillA", "FillB", "FillC", ("RealAlpha", "ImagAlpha"), ("RealBeta", "ImagBeta"), "fold"],\
[["RowMajor", "ColMajor"], ["ConjTrans", "Trans", "NoTrans"], ["ConjTrans", "Trans", "NoTrans"], [32, 64], [32, 64], [32, 64], [(0, 0, 0), (-63, -63, -63)], \
["rand",\
],\
["rand",\
],\
["rand"],\
[(0.0, 0.0), (1.0, 0.0), (2.0, 2.0)],\
[(0.0, 0.0), (1.0, 0.0), (2.0, 2.0)],\
folds])
check_harness = harness.Harness("check")
check_harness.add_suite(check_suite)
check_harness.run()
| 1.765625 | 2 |
elisa/linalg/mat3.py | christianbitter/Elisa | 1 | 12771671 | from __future__ import annotations
import math
from .linalg import is_numeric
from .vec3 import Point3, Vec3
# TODO: is orthogonal
# TODO: is orthonormal
# TODO: cross product
class Mat3:
"""
A 3x3 matrix, built in row-wise order. That is the values a, b, c, d, e, f, g, h, i
go into:
a | b | c
---------
d | e | f
---------
g | h | i
"""
def __init__(self, a, b, c, d, e, f, g, h, i):
self._v = [a, b, c, d, e, f, g, h, i]
@property
def shape(self):
return 3, 3
def __getitem__(self, i):
if 0 <= i <= 8:
return self._v[i]
else:
raise ValueError("index wrong")
def __setitem__(self, i, v):
if v is None:
raise ValueError("v not provided")
if 0 <= i <= 8:
self._v[i] = v
else:
raise ValueError("index for assignment is outside of [0, 8]")
def __add__(self, other):
if is_numeric(other):
return self.__add__(
Mat3(other, other, other, other, other, other, other, other, other)
)
elif isinstance(other, Mat3):
return Mat3(
self._v[0] + other[0],
self._v[1] + other[1],
self._v[2] + other[2],
self._v[3] + other[3],
self._v[4] + other[4],
self._v[5] + other[5],
self._v[6] + other[6],
self._v[7] + other[7],
self._v[8] + other[8],
)
else:
raise ValueError("other has incompatible type")
def __sub__(self, other):
if is_numeric(other):
return self.__sub__(
Mat3(other, other, other, other, other, other, other, other, other)
)
elif isinstance(other, Mat3):
return Mat3(
self._v[0] - other[0],
self._v[1] - other[1],
self._v[2] - other[2],
self._v[3] - other[3],
self._v[4] - other[4],
self._v[5] - other[5],
self._v[6] - other[6],
self._v[7] - other[7],
self._v[8] - other[8],
)
else:
raise ValueError("other has incompatible type")
def __mul__(self, other):
if is_numeric(other):
return Mat3(
self._v[0] * other,
self._v[1] * other,
self._v[2] * other,
self._v[3] * other,
self._v[4] * other,
self._v[5] * other,
self._v[6] * other,
self._v[7] * other,
self._v[8] * other,
)
elif isinstance(other, Vec3):
return Vec3(
self._v[0] * other[0] + self._v[1] * other[1] + self._v[2] * other[2],
self._v[3] * other[0] + self._v[4] * other[1] + self._v[5] * other[2],
self._v[6] * other[0] + self._v[7] * other[1] + self._v[8] * other[2],
)
elif isinstance(other, Mat3):
v0, o0 = self._v[0], other[0]
v1, o1 = self._v[1], other[1]
v2, o2 = self._v[2], other[2]
v3, o3 = self._v[3], other[3]
v4, o4 = self._v[4], other[4]
v5, o5 = self._v[5], other[5]
v6, o6 = self._v[6], other[6]
v7, o7 = self._v[7], other[7]
v8, o8 = self._v[8], other[8]
return Mat3(
v0 * o0 + v1 * o3 + v2 * o6,
v0 * o1 + v1 * o4 + v2 * o7,
v0 * o2 + v1 * o5 + v2 * o8,
v3 * o0 + v4 * o3 + v5 * o6,
v3 * o1 + v4 * o4 + v5 * o7,
v3 * o2 + v4 * o5 + v5 * o8,
v6 * o0 + v7 * o3 + v8 * o6,
v6 * o1 + v7 * o4 + v8 * o7,
v6 * o2 + v7 * o5 + v8 * o8,
)
else:
raise ValueError(
"Matrix multiplication for the provided type combination not implemented"
)
def __truediv__(self, other):
if is_numeric(other):
other_inv = 1.0 / other
return self * other_inv
elif isinstance(other, Mat3):
return self * Mat3.inverse(other)
else:
raise ValueError("Divide does not support the type: {}".format(type(other)))
@property
def a(self):
return self._v[0]
@property
def b(self):
return self._v[1]
@property
def c(self):
return self._v[2]
@property
def d(self):
return self._v[3]
@property
def e(self):
return self._v[4]
@property
def f(self):
return self._v[5]
@property
def g(self):
return self._v[6]
@property
def h(self):
return self._v[7]
@property
def i(self):
return self._v[8]
@property
def trace(self) -> float:
"""Computes the trace of the matrix, i.e. the sum of diagonal elements.
Returns:
float: the trace.
"""
return self._v[0] + self._v[4] + self._v[8]
@property
def diag(self) -> list:
"""Returns the set of diagonal elements of the matrix
Returns:
list: list of three floats given by the matrix diagonal
"""
return [self._v[0], self._v[4], self._v[8]]
@property
def det(self):
return Mat3.determinant(self)
@property
def inv(self):
return Mat3.inverse(self)
@staticmethod
def inverse(m: Mat3) -> Mat3:
"""Computes the inverse of the provided matrix
Args:
m (Mat3): the matrix for which we desire to compute the inverse.
Raises:
ValueError: if the matrix is none, or the matrix's determinant is 0
Returns:
Mat3: the inverse to the provided matrix
"""
if not m:
raise ValueError("Matrix not provided")
d = m.det
if d == 0.0:
raise ValueError("Matrix cannot be inverted, determinant is 0")
d_inv = 1.0 / d
a11, a12, a13 = m.a, m.b, m.c
a21, a22, a23 = m.d, m.e, m.f
a31, a32, a33 = m.g, m.h, m.i
i11 = d_inv * (a22 * a33 - a23 * a32)
i12 = d_inv * (a13 * a32 - a12 * a33)
i13 = d_inv * (a12 * a23 - a13 * a22)
i21 = d_inv * (a23 * a31 - a21 * a33)
i22 = d_inv * (a11 * a33 - a13 * a31)
i23 = d_inv * (a13 * a21 - a11 * a23)
i31 = d_inv * (a21 * a32 - a22 * a31)
i32 = d_inv * (a12 * a31 - a11 * a32)
i33 = d_inv * (a11 * a22 - a12 * a21)
return Mat3(i11, i12, i13, i21, i22, i23, i31, i32, i33)
@staticmethod
def determinant(m: Mat3) -> float:
"""Computes the determinant of the matrix
Args:
m (Mat3): the matrix to compute the determinant for
Raises:
ValueError: if matrix is none
Returns:
float: the determinant
"""
if not m:
raise ValueError("Matrix cannot be none")
a11, a12, a13 = m.a, m.b, m.c
a21, a22, a23 = m.d, m.e, m.f
a31, a32, a33 = m.g, m.h, m.i
d = (
a11 * (a22 * a33 - a23 * a32)
- a12 * (a21 * a33 - a23 * a31)
+ a13 * (a21 * a32 - a22 * a31)
)
return d
@staticmethod
def transpose(m):
return m.t
@property
def c0(self):
return [self._v[0], self._v[3], self._v[5]]
@property
def c1(self):
return [self._v[1], self._v[4], self._v[7]]
@property
def c2(self):
return [self._v[2], self._v[5], self._v[8]]
@property
def r0(self):
return [self._v[0], self._v[1], self._v[2]]
@property
def r1(self):
return [self._v[3], self._v[4], self._v[5]]
@property
def r2(self):
return [self._v[6], self._v[7], self._v[8]]
@property
def t(self):
return Mat3(
self._v[0],
self._v[3],
self._v[6],
self._v[1],
self._v[4],
self._v[7],
self._v[2],
self._v[5],
self._v[8],
)
def __str__(self):
return """[Mat3]
[{}, {}, {}
{}, {}, {}
{}, {}, {}]""".format(
*self._v
)
zero3 = Mat3(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
one3 = Mat3(1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
eye3 = Mat3(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0)
def translate2D(x: float, y: float) -> Mat3:
m = Mat3(1.0, 0.0, x, 0.0, 1.0, y, 0.0, 0.0, 1.0)
return m
| 3.1875 | 3 |
starter_kit/lib/engine.py | ido777/Skillz | 0 | 12771672 | #!/usr/bin/env python
from __future__ import print_function
import time
import traceback
import os
import base64
import random
import sys
import json
import subprocess
from os import walk
from os.path import splitext, join
import io
if sys.version_info >= (3,):
def unicode(s):
return s
from sandbox import get_sandbox
class HeadTail(object):
'Capture first part of file write and discard remainder'
def __init__(self, file, max_capture=510):
self.file = file
self.max_capture = max_capture
self.capture_head_len = 0
self.capture_head = unicode('')
self.capture_tail = unicode('')
def write(self, data):
if self.file:
self.file.write(data)
capture_head_left = self.max_capture - self.capture_head_len
if capture_head_left > 0:
data_len = len(data)
if data_len <= capture_head_left:
self.capture_head += data
self.capture_head_len += data_len
else:
self.capture_head += data[:capture_head_left]
self.capture_head_len = self.max_capture
self.capture_tail += data[capture_head_left:]
self.capture_tail = self.capture_tail[-self.max_capture:]
else:
self.capture_tail += data
self.capture_tail = self.capture_tail[-self.max_capture:]
def flush(self):
if self.file:
self.file.flush()
def close(self):
if self.file:
self.file.close()
def head(self):
return self.capture_head
def tail(self):
return self.capture_tail
def headtail(self):
if self.capture_head != '' and self.capture_tail != '':
sep = unicode('\n..\n')
else:
sep = unicode('')
return self.capture_head + sep + self.capture_tail
def run_game(game, botcmds, options):
# file descriptors for replay and streaming formats
replay_log = options.get('replay_log', None)
stream_log = options.get('stream_log', None)
verbose_log = options.get('verbose_log', None)
debug_log = options.get('debug_log', None)
debug_in_replay = options.get('debug_in_replay', None)
debug_max_length = options.get('debug_max_length', None)
debug_max_count = options.get('debug_max_count', None)
# file descriptors for bots, should be list matching # of bots
input_logs = options.get('input_logs', [None]*len(botcmds))
output_logs = options.get('output_logs', [None]*len(botcmds))
error_logs = options.get('error_logs', [None]*len(botcmds))
capture_errors = options.get('capture_errors', False)
capture_errors_max = options.get('capture_errors_max', 510)
turns = int(options['turns'])
loadtime = float(options['loadtime']) / 1000
turntime = float(options['turntime']) / 1000
strict = options.get('strict', False)
end_wait = options.get('end_wait', 0.0)
location = options.get('location', 'localhost')
game_id = options.get('game_id', 0)
error = ''
bots = []
bot_status = []
bot_turns = []
debug_msgs = [[] for _ in range(len(botcmds))]
debug_msgs_length = [0 for _ in range(len(botcmds))]
debug_msgs_count = [0 for _ in range(len(botcmds))]
debug_msgs_exceeded = [False for _ in range(len(botcmds))]
#helper function to add messages for replay data
def add_debug_messages(bot_index, turn, level, messages):
if (not debug_in_replay) or len(messages) == 0:
return
# In order to calculate this only if we not already exceeded
if not debug_msgs_exceeded[bot_index]:
messages_size = sum(map(lambda m: len(m), messages))
debug_msgs_length[bot_index] += messages_size
debug_msgs_count[bot_index] += len(messages)
if (debug_msgs_count[bot_index] > debug_max_count) or (
debug_msgs_length[bot_index] > debug_max_length):
# update the calculated exceeded
debug_msgs_exceeded[bot_index] = True
if (debug_msgs_exceeded[bot_index]):
debug_msgs[bot_index].append([turn, 2, ["Exceeded debug messages limit."]])
if error_logs and error_logs[bot_index]:
error_logs[bot_index].write("Exceeded debug messages limit.\n")
else:
debug_msgs[bot_index].append([turn, level, messages])
if capture_errors:
error_logs = [HeadTail(log, capture_errors_max) for log in error_logs]
try:
# TODO: where did this come from?? do we need it??
for b, bot in enumerate(botcmds):
# this struct is given to us from the playgame.py file
bot_cwd, bot_path, bot_name = bot
# generate the appropriate command from file extension
bot_cmd = generate_cmd(bot_path)
# generate the sandbox from the bot working directory
sandbox = get_sandbox(bot_cwd, protected_files=[bot_path], secure=options.get('secure_jail', None))
if bot_cmd:
sandbox.start(bot_cmd)
bots.append(sandbox)
bot_status.append('alive')
bot_turns.append(0)
# ensure it started
if not sandbox.is_alive:
bot_status[-1] = 'crashed 0'
bot_turns[-1] = 0
if verbose_log:
verbose_log.write('bot %s did not start\n' % bot_name)
game.kill_player(b)
sandbox.pause()
if not bot_cmd:
# couldnt generate bot command - couldnt recognize the language of the code
add_debug_messages(b, 0, 2, ["Couldnt recognize code language. Are you sure code files are correct?"])
if stream_log:
# stream the start info - including non-player info
stream_log.write(game.get_player_start())
stream_log.flush()
if verbose_log:
verbose_log.write('running for %s turns\n' % turns)
for turn in range(turns+1):
if turn == 0:
game.start_game()
# send game state to each player
for b, bot in enumerate(bots):
if game.is_alive(b):
if turn == 0:
start = game.get_player_start(b) + 'ready\n'
bot.write(start)
if input_logs and input_logs[b]:
input_logs[b].write(start)
input_logs[b].flush()
else:
state = 'turn ' + str(turn) + '\n' + game.get_player_state(b) + 'go\n'
bot.write(state)
if input_logs and input_logs[b]:
input_logs[b].write(state)
input_logs[b].flush()
bot_turns[b] = turn
if turn > 0:
if stream_log:
stream_log.write('turn %s\n' % turn)
stream_log.write('score %s\n' % ' '.join([str(s) for s in game.get_scores()]))
stream_log.write(game.get_state())
stream_log.flush()
game.start_turn()
# get moves from each player
if turn == 0:
time_limit = loadtime
elif turn == 1:
time_limit = max([turntime * 10, 1.500])
else:
time_limit = turntime
if options.get('serial', False):
simul_num = int(options['serial']) # int(True) is 1
else:
simul_num = len(bots)
bot_moves = [[] for b in bots]
error_lines = [[] for b in bots]
statuses = [None for b in bots]
bot_list = [(b, bot) for b, bot in enumerate(bots)
if game.is_alive(b)]
#random.shuffle(bot_list)
for group_num in range(0, len(bot_list), simul_num):
pnums, pbots = zip(*bot_list[group_num:group_num + simul_num])
# get the moves from each bot
moves, errors, status = get_moves(game, pbots, pnums,
time_limit, turn)
for p, b in enumerate(pnums):
bot_moves[b] = moves[p]
error_lines[b] = errors[p]
statuses[b] = status[p]
# print debug messages from bots
if debug_log:
for b, moves in enumerate(bot_moves):
bot_name = botcmds[b][2]
messages = []
for move in moves:
if not move.startswith('m'):
# break since messages come only before orders
break
messages.append(base64.b64decode(move.split(' ')[1]))
if messages:
debug_log.write('turn %4d bot %s Debug prints:\n' % (turn, bot_name))
debug_log.write('Debug>> ' + '\nDebug>> '.join(messages)+'\n')
add_debug_messages(b, turn, 0, messages)
# handle any logs that get_moves produced
for b, errors in enumerate(error_lines):
if errors:
if error_logs and error_logs[b]:
error_logs[b].write(unicode('\n').join(errors)+unicode('\n'))
add_debug_messages(b, turn, 2, [unicode('\n').join(errors)+unicode('\n')])
# set status for timeouts and crashes
for b, status in enumerate(statuses):
if status != None:
bot_status[b] = status
bot_turns[b] = turn
# process all moves
bot_alive = [game.is_alive(b) for b in range(len(bots))]
if turn > 0 and not game.game_over():
for b, moves in enumerate(bot_moves):
valid, ignored, invalid = game.do_moves(b, moves)
bot_name = botcmds[b][2]
if output_logs and output_logs[b]:
output_logs[b].write('# turn %s\n' % turn)
if valid:
if output_logs and output_logs[b]:
output_logs[b].write('\n'.join(valid)+'\n')
output_logs[b].flush()
if ignored:
if error_logs and error_logs[b]:
error_logs[b].write('turn %4d bot %s ignored actions:\n' % (turn, bot_name))
error_logs[b].write('\n'.join(ignored)+'\n')
error_logs[b].flush()
if output_logs and output_logs[b]:
output_logs[b].write('\n'.join(ignored)+'\n')
output_logs[b].flush()
add_debug_messages(b, turn, 1, ignored)
if invalid:
if strict:
game.kill_player(b)
bot_status[b] = 'invalid'
bot_turns[b] = turn
if error_logs and error_logs[b]:
error_logs[b].write('turn %4d bot [%s] invalid actions:\n' % (turn, bot_name))
error_logs[b].write('\n'.join(invalid)+'\n')
error_logs[b].flush()
if output_logs and output_logs[b]:
output_logs[b].write('\n'.join(invalid)+'\n')
output_logs[b].flush()
add_debug_messages(b, turn, 1, invalid)
if turn > 0:
game.finish_turn()
# send ending info to eliminated bots
bots_eliminated = []
for b, alive in enumerate(bot_alive):
if alive and not game.is_alive(b):
bots_eliminated.append(b)
for b in bots_eliminated:
if verbose_log:
verbose_log.write('turn %4d bot %s defeated\n' % (turn, bot_name))
if bot_status[b] == 'alive': # could be invalid move
bot_status[b] = 'defeated'
bot_turns[b] = turn
score_line ='score %s\n' % ' '.join([str(s) for s in game.get_scores(b)])
status_line = 'status %s\n' % ' '.join(map(str, game.order_for_player(b, bot_status)))
status_line += 'playerturns %s\n' % ' '.join(map(str, game.order_for_player(b, bot_turns)))
end_line = 'end\nplayers %s\n' % len(bots) + score_line + status_line
state = end_line + game.get_player_state(b) + 'go\n'
bots[b].write(state)
if input_logs and input_logs[b]:
input_logs[b].write(state)
input_logs[b].flush()
if end_wait:
bots[b].resume()
if bots_eliminated and end_wait:
if verbose_log:
verbose_log.write('waiting {0} seconds for bots to process end turn\n'.format(end_wait))
time.sleep(end_wait)
for b in bots_eliminated:
bots[b].kill()
# with verbose log we want to display the following <pirateCount> <islandCount> <Ranking/leading> <scores>
if verbose_log:
stats = game.get_stats()
stat_keys = sorted(stats.keys())
s = 'turn %4d stats: ' % turn
if turn % 50 == 0:
verbose_log.write(' '*len(s))
for key in stat_keys:
values = stats[key]
verbose_log.write(' {0:^{1}}'.format(key, max(len(key), len(str(values)))))
verbose_log.write('\n')
verbose_log.write(s)
for key in stat_keys:
values = stats[key]
if type(values) == list:
values = '[' + ','.join(map(str,values)) + ']'
verbose_log.write(' {0:^{1}}'.format(values, max(len(key), len(str(values)))))
verbose_log.write('\n')
else:
# no verbose log - print progress every 20 turns
if turn % 20 == 0:
turn_prompt = "turn #%d of max %d\n" % (turn,turns)
sys.stdout.write(turn_prompt)
#alive = [game.is_alive(b) for b in range(len(bots))]
#if sum(alive) <= 1:
if game.game_over():
break
# send bots final state and score, output to replay file
game.finish_game()
score_line ='score %s\n' % ' '.join(map(str, game.get_scores()))
status_line = ''
if game.get_winner() and len(game.get_winner()) == 1:
winner = game.get_winner()[0]
winner_line = 'player %s [%s] is the Winner!\n' % (winner + 1, botcmds[winner][2])
else:
winner_line = 'Game finished at a tie - there is no winner'
status_line += winner_line
end_line = 'end\nplayers %s\n' % len(bots) + score_line + status_line
if stream_log:
stream_log.write(end_line)
stream_log.write(game.get_state())
stream_log.flush()
if verbose_log:
verbose_log.write(score_line)
verbose_log.write(status_line)
verbose_log.flush()
else:
sys.stdout.write(score_line)
sys.stdout.write(status_line)
for b, bot in enumerate(bots):
if game.is_alive(b):
score_line ='score %s\n' % ' '.join([str(s) for s in game.get_scores(b)])
status_line = 'status %s\n' % ' '.join(map(str, game.order_for_player(b, bot_status)))
status_line += 'playerturns %s\n' % ' '.join(map(str, game.order_for_player(b, bot_turns)))
end_line = 'end\nplayers %s\n' % len(bots) + score_line + status_line
state = end_line + game.get_player_state(b) + 'go\n'
bot.write(state)
if input_logs and input_logs[b]:
input_logs[b].write(state)
input_logs[b].flush()
except Exception as e:
# TODO: sanitize error output, tracebacks shouldn't be sent to workers
error = traceback.format_exc()
sys.stderr.write('Error Occurred\n')
sys.stderr.write(str(e) + '\n')
if verbose_log:
verbose_log.write(error)
# error = str(e)
finally:
if end_wait:
for bot in bots:
bot.resume()
if verbose_log and end_wait > 1:
verbose_log.write('waiting {0} seconds for bots to process end turn\n'.format(end_wait))
time.sleep(end_wait)
for bot in bots:
if bot.is_alive:
bot.kill()
bot.release()
if error:
game_result = { 'error': error }
else:
scores = game.get_scores()
game_result = {
'challenge': game.__class__.__name__.lower(),
'location': location,
'game_id': game_id,
'status': bot_status,
'playerturns': bot_turns,
'score': scores,
'winner_names': [botcmds[win][2] for win in game.get_winner()],
'rank': [sorted(scores, reverse=True).index(x) for x in scores],
'replayformat': 'json',
'replaydata': game.get_replay(),
'game_length': turn,
'debug_messages': debug_msgs,
}
if capture_errors:
game_result['errors'] = [head.headtail() for head in error_logs]
if replay_log:
json.dump(game_result, replay_log, sort_keys=True)
return game_result
def get_moves(game, bots, bot_nums, time_limit, turn):
bot_finished = [not game.is_alive(bot_nums[b]) for b in range(len(bots))]
bot_moves = [[] for b in bots]
error_lines = [[] for b in bots]
statuses = [None for b in bots]
# resume all bots
for bot in bots:
if bot.is_alive:
bot.resume()
# don't start timing until the bots are started
start_time = time.time()
# loop until received all bots send moves or are dead
# or when time is up
while (sum(bot_finished) < len(bot_finished) and
time.time() - start_time < time_limit):
time.sleep(0.003)
for b, bot in enumerate(bots):
if bot_finished[b]:
continue # already got bot moves
if not bot.is_alive:
error_lines[b].append(unicode('turn %4d bot %s crashed') % (turn, bot_nums[b]))
statuses[b] = 'crashed'
line = bot.read_error()
while line != None:
error_lines[b].append(line)
line = bot.read_error()
bot_finished[b] = True
game.kill_player(bot_nums[b])
continue # bot is dead
# read a maximum of 100 lines per iteration
for x in range(100):
line = bot.read_line()
if line is None:
# stil waiting for more data
break
line = line.strip()
if line.lower() == 'go':
bot_finished[b] = True
# bot finished sending data for this turn
break
bot_moves[b].append(line)
for x in range(100):
line = bot.read_error()
if line is None:
break
error_lines[b].append(line)
# pause all bots again
for bot in bots:
if bot.is_alive:
bot.pause()
# check for any final output from bots
for b, bot in enumerate(bots):
if bot_finished[b]:
continue # already got bot moves
if not bot.is_alive:
error_lines[b].append(unicode('turn %4d bot %s crashed') % (turn, bot_nums[b]))
statuses[b] = 'crashed'
line = bot.read_error()
while line != None:
error_lines[b].append(line)
line = bot.read_error()
bot_finished[b] = True
game.kill_player(bot_nums[b])
continue # bot is dead
line = bot.read_line()
while line is not None and len(bot_moves[b]) < 40000:
line = line.strip()
if line.lower() == 'go':
bot_finished[b] = True
# bot finished sending data for this turn
break
bot_moves[b].append(line)
line = bot.read_line()
line = bot.read_error()
while line is not None and len(error_lines[b]) < 1000:
error_lines[b].append(line)
line = bot.read_error()
# kill timed out bots
for b, finished in enumerate(bot_finished):
if not finished:
error_lines[b].append(unicode('turn %4d bot %s timed out') % (turn, bot_nums[b]))
statuses[b] = 'timeout'
bot = bots[b]
for x in range(100):
line = bot.read_error()
if line is None:
break
error_lines[b].append(line)
game.kill_player(bot_nums[b])
bots[b].kill()
return bot_moves, error_lines, statuses
def get_java_path():
if (os.name != "nt"):
return 'java'
# TODO: search path as well!
# TODO: actually run os.system('java -version') to see version
javas = []
if os.path.exists("C:\\Program Files\\java"):
javas += [os.path.join("C:\\Program Files\\java",i) for i in os.listdir("C:\\Program Files\\java")]
if os.path.exists("C:\\Program Files (x86)\\java"):
javas += [os.path.join("C:\\Program Files (x86)\\java",i) for i in os.listdir("C:\\Program Files (x86)\\java")]
javas.reverse() # this will make us pick the higher version
for java in javas:
if 'jdk' in java.lower() and any([ver in java for ver in ['1.6','1.7','1.8']]):
return os.path.join(java,"bin","java.exe")
print("Cannot find path of Java JDK version 1.6 or over!")
# we should really quit but since we dont yet search path - first try default
return 'java'
def get_dot_net_version():
pass
def select_files(root, files, suffix):
"""
simple logic here to filter out interesting files
"""
selected_files = []
for file in files:
#do concatenation here to get full path
full_path = join(root, file)
ext = splitext(file)[1]
if ext == suffix:
selected_files.append(full_path)
return selected_files
def build_recursive_dir_tree(path, suffix):
"""
path - where to begin folder scan
"""
selected_files = []
for root, dirs, files in walk(path):
selected_files += select_files(root, files, suffix)
return selected_files
def recognize_language(bot_path):
'''Decide between java, python or csh'''
'''First do single file case'''
if not os.path.isdir(bot_path):
if bot_path.endswith('.py') or bot_path.endswith('.pyc'):
return 'python'
elif bot_path.endswith('.cs'):
return 'csh'
elif bot_path.endswith('.java'):
return 'java'
else:
return
''' Now handle directory case '''
java_files = build_recursive_dir_tree(bot_path, '.java')
csh_files = build_recursive_dir_tree(bot_path, '.cs')
python_files = build_recursive_dir_tree(bot_path, '.py')
max_files = max(len(java_files), len(csh_files), len(python_files))
if max_files == 0:
return
if len(java_files) == max_files:
return 'java'
elif len(csh_files) == max_files:
return 'csh'
elif len(python_files) == max_files:
return 'python'
return
def generate_cmd(bot_path):
''' Generates the command to run and returns other information from the filename given '''
csh_runner_path = os.path.join(os.path.dirname(__file__), "cshRunner.exe")
java_runner_path = os.path.join(os.path.dirname(__file__), "javaRunner.jar")
python_runner_path = os.path.join(os.path.dirname( __file__), "pythonRunner.py")
command = ''
lang = recognize_language(bot_path)
if lang == 'python':
command = 'python "%s" "%s"' % (python_runner_path, bot_path)
elif lang == 'csh':
# Run with Mono if Unix. But in the future just receive source code (.cs) and compile on the fly
if (os.name == "nt"):
command = '"%s" "%s"' % (csh_runner_path, bot_path)
else:
command = 'mono --debug %s %s' % (csh_runner_path, bot_path)
elif lang == 'java':
command = '"%s" -jar "%s" "%s"' % (get_java_path(), java_runner_path, bot_path)
else:
if os.path.isdir(bot_path):
sys.stdout.write('Couldnt find code in folder! %s\n' % (bot_path))
else:
sys.stdout.write('Unknown file format! %s\nPlease give file that ends with .cs , .java or .py\n' % (bot_path))
#sys.exit(-1)
#print(command)
return command
| 2.40625 | 2 |
tests/gpu/test_gpuargs.py | gnafit/gna | 5 | 12771673 | #!/usr/bin/env python
"""Check Identity class"""
from matplotlib import pyplot as plt
import numpy as N
from load import ROOT as R
from matplotlib.ticker import MaxNLocator
from gna import constructors as C
from gna.bindings import DataType
from gna.unittest import *
from gna import context
#
# Create the matrix
#
def test_io(opts):
print('Test inputs/outputs (Identity)')
mat = N.arange(12, dtype='d').reshape(3, 4)
print( 'Input matrix (numpy)' )
print( mat )
print()
#
# Create transformations
#
points = C.Points(mat)
identity = R.Identity()
identity.identity.switchFunction('identity_gpuargs_h')
points.points.points >> identity.identity.source
identity.print()
res = identity.identity.target.data()
dt = identity.identity.target.datatype()
assert N.allclose(mat, res), "C++ and Python results doesn't match"
#
# Dump
#
print( 'Eigen dump (C++)' )
identity.dump()
print()
print( 'Result (C++ Data to numpy)' )
print( res )
print()
print( 'Datatype:', str(dt) )
def gpuargs_make(nsname, mat1, mat2):
from gna.env import env
ns = env.globalns(nsname)
ns.reqparameter('par1', central=1.0, fixed=True, label='Dummy parameter 1')
ns.reqparameter('par2', central=1.5, fixed=True, label='Dummy parameter 2')
ns.reqparameter('par3', central=1.01e5, fixed=True, label='Dummy parameter 3')
ns.printparameters(labels=True)
points1, points2 = C.Points(mat1), C.Points(mat2)
with ns:
dummy = C.Dummy(4, "dummy", ['par1', 'par2', 'par3'])
return dummy, points1, points2, ns
@floatcopy(globals(), addname=True)
def test_vars_01_local(opts, function_name):
print('Test inputs/outputs/variables (Dummy)')
mat1 = N.arange(12, dtype='d').reshape(3, 4)
mat2 = N.arange(15, dtype='d').reshape(5, 3)
dummy, points1, points2, ns = gpuargs_make(function_name, mat1, mat2)
dummy.dummy.switchFunction('dummy_gpuargs_h_local')
dummy.add_input(points1, 'input1')
dummy.add_input(points2, 'input2')
dummy.add_output('out1')
dummy.add_output('out2')
dummy.print()
res1 = dummy.dummy.out1.data()
res2 = dummy.dummy.out2.data()
dt1 = dummy.dummy.out1.datatype()
dt2 = dummy.dummy.out2.datatype()
assert N.allclose(res1, 0.0), "C++ and Python results doesn't match"
assert N.allclose(res2, 1.0), "C++ and Python results doesn't match"
print( 'Result (C++ Data to numpy)' )
print( res1 )
print( res2 )
print()
print( 'Datatype:', str(dt1) )
print( 'Datatype:', str(dt2) )
print('Change 3d variable')
ns['par3'].set(-1.0)
res1 = dummy.dummy.out1.data()
@floatcopy(globals(), addname=True)
def test_vars_02(opts, function_name):
print('Test inputs/outputs/variables (Dummy)')
mat1 = N.arange(12, dtype='d').reshape(3, 4)
mat2 = N.arange(15, dtype='d').reshape(5, 3)
with context.manager(100) as manager:
dummy, points1, points2, ns = gpuargs_make(function_name, mat1, mat2)
manager.setVariables(C.stdvector([par.getVariable() for (name, par) in ns.walknames()]))
dummy.dummy.switchFunction('dummy_gpuargs_h')
dummy.add_input(points1, 'input1')
dummy.add_input(points2, 'input2')
dummy.add_output('out1')
dummy.add_output('out2')
dummy.print()
res1 = dummy.dummy.out1.data()
res2 = dummy.dummy.out2.data()
dt1 = dummy.dummy.out1.datatype()
dt2 = dummy.dummy.out2.datatype()
assert N.allclose(res1, 0.0), "C++ and Python results doesn't match"
assert N.allclose(res2, 1.0), "C++ and Python results doesn't match"
print( 'Result (C++ Data to numpy)' )
print( res1 )
print( res2 )
print()
print( 'Datatype:', str(dt1) )
print( 'Datatype:', str(dt2) )
print('Change 3d variable')
ns['par3'].set(-1.0)
res1 = dummy.dummy.out1.data()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
# parser.add_argument('-g', '--gpuargs', action='store_true')
run_unittests(globals(), parser.parse_args())
| 2.484375 | 2 |
hcap/views/my_capacity_notifications_view_set.py | fabiommendes/capacidade_hospitalar | 0 | 12771674 | from django.utils.translation import gettext_lazy as _
from hcap_notifications.models import HealthcareUnitCapacity
from hcap_utils.contrib.material.viewsets import ModelViewSet
class MyCapacityNotificationsViewSet(ModelViewSet):
model = HealthcareUnitCapacity
label = "hcap"
name = "my_capacity_notifications"
list_display = ("healthcare_unit", "date", "total_clinical_beds", "total_icu_beds")
ordering = ("healthcare_unit", "-date")
def get_queryset(self, request):
return self.model.objects.filter(notifier__user=request.user)
def has_add_permission(self, request):
return False
def has_view_permission(self, request, obj=None):
user = request.user
return user is not None and user.is_authenticated and user.is_notifier
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
| 2.15625 | 2 |
py_algo/basics/implementation/binary_movement.py | Sk0uF/Algorithms | 1 | 12771675 | """
Codemonk link: https://www.hackerearth.com/practice/basic-programming/implementation/basics-of-implementation/practice-problems/algorithm/binary-movement/
You are given a bit array (0 and 1) of size n. Your task is to perform Q queries. In each query you have to toggle all
the bits from the index L to R (L and R inclusive). After performing all the queries, print the count of all the set
bits and the newly updated array.
Input - Output:
The first line contains an integer N denoting the size of the array.
The Second line contains N space-separated binary numbers.
The third line contains Q denoting the number of queries.
The next Q lines contain L and R for each ith query.
Print the count of all the set bits and newly updated array in the new line.
Sample input:
6
1 0 1 1 0 1
3
1 3
4 5
2 5
Sample Output:
3
0 0 1 1 0 1
"""
"""
The problem can be translated to the following: To find the value of each index, find how many queries start before this
index and how many end before or after this specific index. If, overall, we find an even odd number of queries starting
before and ending after or at the position of the index, then we change its value.
O(N) for the first and second "for".
Final complexity: O(2*N) => O(N)
"""
inp_len = int(input())
bit_list = list(map(int, input().rstrip().split()))
q_len = int(input())
# Creating 2 supplementary arrays.
count_queries_before = [0] * inp_len
count_queries_after = [0] * inp_len
count = 0
count_ones = 0
for i in range(0, q_len):
rl = list(map(int, input().rstrip().split()))
# The first array contains the starting positions of all the queries.
# The second array contains the ending positions of all the queries.
count_queries_before[rl[0]-1] += 1
count_queries_after[rl[1]-1] += 1
count += count_queries_before[0]
if count % 2 != 0:
if bit_list[0] == 0:
bit_list[0] = 1
else:
bit_list[0] = 0
for i in range(1, inp_len):
# For each next index,
# add the amount of of queries starting from there and
# subtract the amount of queries ending 1 index before.
count += count_queries_before[i]
count -= count_queries_after[i-1]
if count % 2 != 0:
if bit_list[i] == 0:
bit_list[i] = 1
else:
bit_list[i] = 0
count_ones += bit_list[i]
print(count_ones)
print(*bit_list)
| 3.84375 | 4 |
src/twitter.py | shivchander/political-alignment-prediction | 1 | 12771676 | <gh_stars>1-10
import tweepy
from src.keys import keys
from src.bacon import Frontier, safe_lookup_users
__author__ = '<NAME>'
class Tweet:
def __init__(self, user_handle):
"""
:param user_handle: twitter username without '@' symbol
:return: class method
"""
self._consumer_key = keys['consumer_key']
self._consumer_secret = keys['consumer_secret']
self._access_token = keys['access_token']
self._access_token_secret = keys['access_token_secret']
# configure OAUTH
self.auth = tweepy.OAuthHandler(self._consumer_key, self._consumer_secret)
self.auth.set_access_token(self._access_token, self._access_token_secret)
# set up tweepy client
self.api = tweepy.API(
self.auth,
wait_on_rate_limit=True,
wait_on_rate_limit_notify=True,
timeout=60,
compression=True
)
self.user_handle = user_handle
# politicians' screennames for bacon number
self._dem_usrs = ['BernieSanders', 'AOC', 'JoeBiden']
self._rep_usrs = ['realDonaldTrump', 'VP', 'GOP']
def get_friends(self):
"""
:return: array containing the IDs of users being followed by self.
"""
try:
# get friends ids
friends_ids = []
for friend in tweepy.Cursor(self.api.friends_ids, screen_name=self.user_handle).pages():
friends_ids.append(friend)
# get twitter handles
friends_handles = [user.screen_name for user in self.api.lookup_users(user_ids=friends_ids)]
return friends_handles
except tweepy.TweepError:
print('Oops somethings not right, good luck figuring out what')
return []
def get_followers(self):
"""
:return: array containing the IDs of users following self.
"""
try:
# get friends ids
followers_ids = []
for follower in tweepy.Cursor(self.api.followers_ids, id=self.user_handle).pages():
followers_ids.append(follower)
# get twitter handles
followers_handles = [user.screen_name for user in self.api.lookup_users(user_ids=followers_ids)]
return followers_handles
except tweepy.TweepError:
print('Oops somethings not right, good luck figuring out what')
return []
def get_tweets(self, limit=100):
"""
:param limit: max limit of tweets
:return: array containing the tweets from self.user_handle
"""
try:
tweets = []
for obj in tweepy.Cursor(self.api.user_timeline, screen_name=self.user_handle,
include_rts=False, tweet_mode='extended').items(limit):
if len(tweets) < limit:
tweets.append(obj.full_text)
else:
break
return tweets
except tweepy.TweepError:
print('Oops somethings not right, good luck figuring out what')
return []
def get_retweets(self, limit=100):
"""
:param limit: max limit of tweets
:return: array containing the retweets from self.user_handle
"""
try:
retweets = []
for obj in tweepy.Cursor(self.api.user_timeline, screen_name=self.user_handle,
include_rts=True, tweet_mode='extended').items():
if obj.full_text.startswith('RT'):
if len(retweets) < limit:
retweets.append(obj.full_text)
else:
break
return retweets
except tweepy.TweepError:
print('Oops somethings not right, good luck figuring out what')
return []
def get_favtweets(self, limit=100):
"""
:param limit: max limit of tweets
:return: array containing the tweets favorite-ed by self.user_handle
"""
try:
favtweets = []
for obj in tweepy.Cursor(self.api.favorites, id=self.user_handle).items(limit):
if len(favtweets) < limit:
favtweets.append(obj.text)
else:
break
return favtweets
except tweepy.TweepError:
print('Oops somethings not right, good luck figuring out what')
return []
def get_location(self):
"""
:return: location of the self
"""
try:
print(self.api.get_user(screen_name=self.user_handle).location)
except tweepy.TweepError:
print('Oops somethings not right, good luck figuring out what')
return
def get_bacon(self):
"""
:return: two dicts (dem, rep) with predetermined politicians as the keys and the bacon num as the values
"""
source = self.user_handle
api = self.api
dest_dem_usrs = self._dem_usrs
dest_rep_usrs = self._rep_usrs
try:
# Get user ids from the user handles
src_user = api.get_user(source)
dem = {}
rep = {}
for party in (dest_dem_usrs, dest_rep_usrs):
if party == dest_dem_usrs:
party_flag = 'dem'
else:
party_flag = 'rep'
for destination in party:
if source == destination:
separation = 0
continue
else:
dest_user = api.get_user(destination)
src_frontier = Frontier(src_user.id, api.friends_ids
, lambda n: n.friends_count
, lambda ids: safe_lookup_users(api, ids))
dest_frontier = Frontier(dest_user.id, api.followers_ids
, lambda n: n.followers_count
, lambda ids: safe_lookup_users(api, ids))
while src_frontier.covered_all() or dest_frontier.covered_all():
# Expand the source node's frontier first
nodes = src_frontier.expand_perimeter()
# check if any one of new nodes is on the destination's perimeter
if any(map(lambda n: dest_frontier.is_on_perimeter(n), nodes)):
# print("Found!")
break
# Copy twice with a slight pain. If you have to copy thrice, abstract!
nodes = dest_frontier.expand_perimeter()
if any(map(lambda n: src_frontier.is_on_perimeter(n), nodes)):
# print("Found!")
break
m = src_frontier.perimeter.intersection(dest_frontier.perimeter).pop()
# The man in the middle!
m = src_frontier.perimeter.intersection(dest_frontier.perimeter).pop()
separation = src_frontier.get_distance(m) + dest_frontier.get_distance(m) - 1
if party_flag == 'dem':
dem[dest_user.name] = separation
else:
rep[dest_user.name] = separation
return dem, rep
except tweepy.RateLimitError:
print("""It seems we have exceeded twitter's api call limit.
Please come back after 15 minutes.""")
except tweepy.TweepError as e:
print("Something went wrong!")
print(e)
x = Tweet('01110011shiv')
print(x.get_bacon())
| 2.859375 | 3 |
drake_pytorch/__init__.py | DAIRLab/drake-pytorch | 9 | 12771677 | from drake_pytorch.symbolic import sym_to_pytorch, Simplifier
| 1.171875 | 1 |
dqsort/sort.py | MaestroGraph/quicksort | 0 | 12771678 | <reponame>MaestroGraph/quicksort<filename>dqsort/sort.py
import torch
import torch.nn.functional as F
from torch import nn, Tensor
import util, tensors
import numpy as np
"""
Modules to implement differentiable quicksort.
```Split``` implements the half-permutation.
```SortLayer``` chains these into quicksort.
"""
class Split(nn.Module):
"""
A split matrix moves the elements of the input to either the top or the bottom
half of a subsection of the output, but keeps the ordering intact otherwise.
For depth 0, each element is moved to the top or bottom half of the output. For
depth 1 each element is moved to the top or bottom half of its current half of
the matrix and so on.
"""
def __init__(self, size, depth, additional=1, sigma_scale=0.1, sigma_floor=0.0):
super().__init__()
template = torch.LongTensor(range(size)).unsqueeze(1).expand(size, 2)
self.register_buffer('template', template)
self.size = size
self.depth = depth
self.sigma_scale = sigma_scale
self.sigma_floor = sigma_floor
self.additional = additional
def duplicates(self, tuples):
"""
Takes a list of tuples, and for each tuple that occurs multiple times
marks all but one of the occurences (in the mask that is returned).
:param tuples: A size (batch, k, rank) tensor of integer tuples
:return: A size (batch, k) mask indicating the duplicates
"""
b, k, r = tuples.size()
# unique = ((tuples.float() + 1) ** primes).prod(dim=2) # unique identifier for each tuple
unique = util.unique(tuples.view(b*k, r)).squeeze().view(b, k)
sorted, sort_idx = torch.sort(unique, dim=1)
_, unsort_idx = torch.sort(sort_idx, dim=1)
mask = sorted[:, 1:] == sorted[:, :-1]
# mask = mask.view(b, k - 1)
zs = torch.zeros(b, 1, dtype=torch.bool, device='cuda' if tuples.is_cuda else 'cpu')
mask = torch.cat([zs, mask], dim=1)
return torch.gather(mask, 1, unsort_idx)
def generate_integer_tuples(self, offset, additional=16):
b, s = offset.size()
choices = offset.round()[:, None, :].to(torch.bool)
if additional > 0:
sampled = util.sample_offsets(b, additional, s, self.depth, cuda=offset.is_cuda)
# sampled = ~ choices
choices = torch.cat([choices, sampled], dim=1)
return self.generate(choices, offset)
def generate(self, choices, offset):
choices = choices.detach()
b, n, s = choices.size()
probs = offset[:, None, :].expand(b, n, s).clone()
sel = 1.0 - probs.clone()[~ choices]
probs[~ choices] = sel
# probs[~ choices] = 1.0 - probs[~ choices].clone()
# prob now contains the probability (under offset) of the choices made
probs = probs.prod(dim=2, keepdim=True).expand(b, n, s).contiguous()
# Generate indices from the chosen offset
indices = util.split(choices, self.depth)
if n > 1:
dups = self.duplicates(indices)
probs = probs.clone()
probs[dups] = 0.0
probs = probs / probs.sum(dim=1, keepdim=True)
return indices, probs
def forward(self, input, keys, offset, train=True, reverse=False, verbose=False):
if train:
indices, probs = self.generate_integer_tuples(offset, self.additional)
else:
indices, probs = self.generate_integer_tuples(offset, 0)
if verbose:
print(indices[0, 0])
indices = indices.detach()
b, n, s = indices.size()
template = self.template[None, None, :, :].expand(b, n, s, 2).contiguous()
if not reverse: # normal half-permutation
template[:, :, :, 0] = indices
else: # reverse the permutation
template[:, :, :, 1] = indices
indices = template
indices = indices.contiguous().view(b, -1, 2)
probs = probs.contiguous().view(b, -1)
output = tensors.batchmm(indices, probs, (s, s), input)
keys_out = tensors.batchmm(indices, probs, (s, s), keys[:, :, None]).squeeze(-1)
return output, keys_out
class SortLayer(nn.Module):
"""
"""
def __init__(self, size, additional=0, sigma_scale=0.1, sigma_floor=0.0, certainty=10.0):
super().__init__()
mdepth = int(np.log2(size))
self.layers = nn.ModuleList()
for d in range(mdepth):
self.layers.append(Split(size, d, additional, sigma_scale, sigma_floor))
# self.certainty = nn.Parameter(torch.tensor([certainty]))
self.register_buffer('certainty', torch.tensor([certainty]))
# self.offset = nn.Sequential(
# util.Lambda(lambda x : x[:, 0] - x[:, 1]),
# util.Lambda(lambda x : x * self.certainty),
# nn.Sigmoid()
# )
def forward(self, x : Tensor, keys : Tensor, target=None, train=True, verbose=False):
xs = [x]
targets = [target]
offsets = []
b, s, z = x.size()
b, s = keys.size()
t = target
for d, split in enumerate(self.layers):
buckets = keys[:, :, None].view(b, 2**d, -1)
# TODO: if you set batchsize=1, you get an error here, because the batch dim gets squeezed out in the split layer
# compute pivots
pivots = buckets.view(b*2**d, -1)
pivots = median(pivots, keepdim=True)
pivots = pivots.view(b, 2 ** d, -1).expand_as(buckets)
pivots = pivots.contiguous().view(b, -1).expand_as(keys)
# compute offsets by comparing values to pivots
if train:
offset = keys - pivots
offset = torch.sigmoid(offset * self.certainty)
else:
offset = (keys > pivots).float()
# offset = offset.round() # DEBUG
offsets.append(offset)
x, keys = split(x, keys, offset, train=train, verbose=verbose)
xs.append(x)
if verbose:
print('o', offset[0])
print('k', keys[0])
if target is not None:
for split, offset in zip(self.layers[::-1], offsets[::-1]):
t, _ = split(t, keys, offset, train=train, reverse=True)
targets.insert(0, t)
if target is None:
return x, keys
return xs, targets, keys
def median(x, keepdim=False):
b, s = x.size()
y = x.sort(dim=1)[0][:, s//2-1:s//2+1].mean(dim=1, keepdim=keepdim)
return y
if __name__ == '__main__':
x = torch.randn(3, 4)
print(x)
print(median(x)) | 2.71875 | 3 |
sublime/Packages/Anaconda/anaconda_lib/explore_panel.py | mklewitz-kisura/dotfiles | 0 | 12771679 |
# Copyright (C) 2013 ~ 2016 - <NAME> <<EMAIL>>
# This program is Free Software see LICENSE file for details
import sublime
from ._typing import List
from Default.history_list import get_jump_history_for_view
class ExplorerPanel:
"""
Creates a panel that can be used to explore nested options sets
The data structure for the options is as follows:
Options[
{
'title': 'Title Data'
'details': 'Details Data',
'location': 'File: {} Line: {} Column: {}',
'position': 'filepath:line:col',
'options': [
{
'title': 'Title Data'
'details': 'Details Data',
'location': 'File: {} Line: {} Column: {}',
'position': 'filepath:line:col',
'options': [
]...
}
]
}
]
So we can nest as many levels as we want
"""
def __init__(self, view: sublime.View, options: List) -> None:
self.options = options
self.view = view
self.selected = [] # type: List
self.restore_point = view.sel()[0]
def show(self, cluster: List, forced: bool=False) -> None:
"""Show the quick panel with the given options
"""
if not cluster:
cluster = self.options
if len(cluster) == 1 and not forced:
try:
Jumper(self.view, cluster[0]['position']).jump()
except KeyError:
if len(cluster[0].get('options', [])) == 1 and not forced:
Jumper(
self.view, cluster[0]['options'][0]['position']).jump()
return
self.last_cluster = cluster
quick_panel_options = []
for data in cluster:
tmp = [data['title']]
if 'details' in data:
tmp.append(data['details'])
if 'location' in data:
tmp.append(data['location'])
quick_panel_options.append(tmp)
self.view.window().show_quick_panel(
quick_panel_options,
on_select=self.on_select,
on_highlight=lambda index: self.on_select(index, True)
)
def on_select(self, index: int, transient: bool=False) -> None:
"""Called when an option is been made in the quick panel
"""
if index == -1:
self._restore_view()
return
cluster = self.last_cluster
node = cluster[index]
if transient and 'options' in node:
return
if 'options' in node:
self.prev_cluster = self.last_cluster
opts = node['options'][:]
opts.insert(0, {'title': '<- Go Back', 'position': 'back'})
sublime.set_timeout(lambda: self.show(opts), 0)
else:
if node['position'] == 'back' and not transient:
sublime.set_timeout(lambda: self.show(self.prev_cluster), 0)
elif node['position'] != 'back':
Jumper(self.view, node['position']).jump(transient)
def _restore_view(self):
"""Restore the view and location
"""
sublime.active_window().focus_view(self.view)
self.view.show(self.restore_point)
if self.view.sel()[0] != self.restore_point:
self.view.sel().clear()
self.view.sel().add(self.restore_point)
class Jumper:
"""Jump to the specified file line and column making an indicator to toggle
"""
def __init__(self, view: sublime.View, position: str) -> None:
self.position = position
self.view = view
def jump(self, transient: bool=False) -> None:
"""Jump to the selection
"""
flags = sublime.ENCODED_POSITION
if transient is True:
flags |= sublime.TRANSIENT
get_jump_history_for_view(self.view).push_selection(self.view)
sublime.active_window().open_file(self.position, flags)
if not transient:
self._toggle_indicator()
def _toggle_indicator(self) -> None:
"""Toggle mark indicator to focus the cursor
"""
path, line, column = self.position.rsplit(':', 2)
pt = self.view.text_point(int(line) - 1, int(column))
region_name = 'anaconda.indicator.{}.{}'.format(
self.view.id(), line
)
for i in range(3):
delta = 300 * i * 2
sublime.set_timeout(lambda: self.view.add_regions(
region_name,
[sublime.Region(pt, pt)],
'comment',
'bookmark',
sublime.DRAW_EMPTY_AS_OVERWRITE
), delta)
sublime.set_timeout(
lambda: self.view.erase_regions(region_name),
delta + 300
)
| 3.078125 | 3 |
test/helpers.py | dboeckenhoff/tikzplotlib | 0 | 12771680 | <filename>test/helpers.py
import os
import subprocess
import tempfile
import matplotlib
import matplotlib.pyplot as plt
import tikzplotlib
def print_tree(obj, indent=""):
"""Recursively prints the tree structure of the matplotlib object.
"""
if isinstance(obj, matplotlib.text.Text):
print(indent, type(obj).__name__, '("{}")'.format(obj.get_text()))
else:
print(indent, type(obj).__name__)
for child in obj.get_children():
print_tree(child, indent + " ")
# https://stackoverflow.com/a/845432/353337
def _unidiff_output(expected, actual):
import difflib
expected = expected.splitlines(1)
actual = actual.splitlines(1)
diff = difflib.unified_diff(expected, actual)
return "".join(diff)
def assert_equality(
plot, filename, assert_compilation=True, flavor="latex", **extra_get_tikz_code_args
):
plot()
code = tikzplotlib.get_tikz_code(
include_disclaimer=False,
float_format=".8g",
flavor=flavor,
**extra_get_tikz_code_args,
)
plt.close()
this_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(this_dir, filename), "r", encoding="utf-8") as f:
reference = f.read()
assert reference == code, _unidiff_output(code, reference)
if assert_compilation:
plot()
code = tikzplotlib.get_tikz_code(
include_disclaimer=False,
standalone=True,
flavor=flavor,
**extra_get_tikz_code_args,
)
plt.close()
assert _compile(code, flavor) is not None, code
def _compile(code, flavor):
_, tmp_base = tempfile.mkstemp()
tex_file = tmp_base + ".tex"
with open(tex_file, "w", encoding="utf-8") as f:
f.write(code)
# change into the directory of the TeX file
os.chdir(os.path.dirname(tex_file))
# compile the output to pdf
cmdline = dict(
latex=["pdflatex", "--interaction=nonstopmode"],
context=["context", "--nonstopmode"],
)[flavor]
try:
subprocess.check_output(cmdline + [tex_file], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print(f"{cmdline[0]} output:")
print("=" * 70)
print(e.output.decode("utf-8"))
print("=" * 70)
output_pdf = None
else:
output_pdf = tmp_base + ".pdf"
return output_pdf
def compare_mpl_tex(plot, flavor="latex"):
plot()
code = tikzplotlib.get_tikz_code(standalone=True)
directory = os.getcwd()
filename = "test-0.png"
plt.savefig(filename)
plt.close()
pdf_file = _compile(code, flavor)
pdf_dirname = os.path.dirname(pdf_file)
# Convert PDF to PNG.
subprocess.check_output(
["pdftoppm", "-r", "1000", "-png", pdf_file, "test"], stderr=subprocess.STDOUT
)
png_path = os.path.join(pdf_dirname, "test-1.png")
os.rename(png_path, os.path.join(directory, "test-1.png"))
| 2.53125 | 3 |
tests/interface_test.py | welfare-state-analytics/pyriksprot | 0 | 12771681 | from __future__ import annotations
import glob
import os
import uuid
from typing import Callable
import pandas as pd
import pytest
from black import itertools
from pyriksprot import interface, to_speech
from pyriksprot.corpus import tagged as tagged_corpus
from .utility import TAGGED_SOURCE_PATTERN, UTTERANCES_DICTS, create_utterances
# pylint: disable=redefined-outer-name
jj = os.path.join
@pytest.fixture(scope='module')
def utterances() -> list[interface.Utterance]:
return create_utterances()
def test_utterance_text():
u: interface.Utterance = interface.Utterance(u_id="A", speaker_hash="x", who="x", paragraphs=["X", "Y", "C"])
assert u.text == '\n'.join(["X", "Y", "C"])
def test_utterance_checksumtext():
u: interface.Utterance = interface.Utterance(u_id="A", speaker_hash="x", who="x", paragraphs=["X", "Y", "C"])
assert u.checksum() == '6060d006e0494206'
def test_utterances_to_dict():
who_sequences: list[list[interface.Utterance]] = to_speech.MergeByWhoSequence().cluster(None)
assert who_sequences == []
who_sequences: list[list[interface.Utterance]] = to_speech.MergeByWhoSequence().cluster([])
assert who_sequences == []
utterances: list[interface.Utterance] = [
interface.Utterance(u_id=f'{uuid.uuid4()}', speaker_hash="xa1", who='A'),
interface.Utterance(u_id=f'{uuid.uuid4()}', speaker_hash="xa1", who='A'),
interface.Utterance(u_id=f'{uuid.uuid4()}', speaker_hash="xb1", who='B'),
interface.Utterance(u_id=f'{uuid.uuid4()}', speaker_hash="xb1", who='B'),
interface.Utterance(u_id=f'{uuid.uuid4()}', speaker_hash="xa2", who='A'),
]
who_sequences: list[list[interface.Utterance]] = to_speech.MergeByWhoSequence().cluster(utterances)
assert len(who_sequences) == 3
assert len(who_sequences[0]) == 2
assert len(who_sequences[1]) == 2
assert len(who_sequences[2]) == 1
assert set(x.who for x in who_sequences[0]) == {'A'}
assert set(x.who for x in who_sequences[1]) == {'B'}
assert set(x.who for x in who_sequences[2]) == {'A'}
def test_utterances_who_sequences(utterances: list[interface.Utterance]):
data = interface.UtteranceHelper.to_dict(utterances)
assert data == UTTERANCES_DICTS
def test_utterances_to_csv(utterances: list[interface.Utterance]):
data: str = interface.UtteranceHelper.to_csv(utterances)
loaded_utterances = interface.UtteranceHelper.from_csv(data)
assert [x.__dict__ for x in utterances] == [x.__dict__ for x in loaded_utterances]
def test_utterances_to_json(utterances: list[interface.Utterance]):
data: str = interface.UtteranceHelper.to_json(utterances)
loaded_utterances = interface.UtteranceHelper.from_json(data)
assert [x.__dict__ for x in utterances] == [x.__dict__ for x in loaded_utterances]
def test_utterances_to_pandas(utterances: list[interface.Utterance]):
data: pd.DataFrame = interface.UtteranceHelper.to_dataframe(utterances)
assert data.reset_index().to_dict(orient='records') == UTTERANCES_DICTS
def test_protocol_create(utterances: list[interface.Utterance]):
protocol: interface.Protocol = interface.Protocol(
date="1958", name="prot-1958-fake", utterances=utterances, speaker_notes={}
)
assert protocol is not None
assert len(protocol.utterances) == 5
assert len(protocol) == 5
assert protocol.name == "prot-1958-fake"
assert protocol.date == "1958"
assert protocol.name == 'prot-1958-fake'
assert protocol.date == '1958'
assert protocol.has_text, 'has text'
assert protocol.checksum() == '7e5112f9db8c8462d89fac08714ce15b432d7733', 'checksum'
assert protocol.text == '\n'.join(text.text for text in utterances)
def test_protocol_preprocess():
"""Modifies utterances:"""
utterances: list[interface.Utterance] = create_utterances()
protocol: interface.Protocol = interface.Protocol(
date="1950", name="prot-1958-fake", utterances=utterances, speaker_notes={}
)
preprocess: Callable[[str], str] = lambda t: 'APA'
protocol.preprocess(preprocess=preprocess)
assert protocol.text == 'APA\nAPA\nAPA\nAPA\nAPA\nAPA'
def test_protocols_to_items():
filenames: list[str] = glob.glob(TAGGED_SOURCE_PATTERN, recursive=True)
protocols: list[interface.Protocol] = [p for p in tagged_corpus.load_protocols(source=filenames)]
_ = itertools.chain(
p.to_segments(content_type=interface.ContentType.Text, segment_level=interface.SegmentLevel.Who)
for p in protocols
)
| 2.125 | 2 |
S4/S4 Decompiler/Old Libraries/uncompyle6/parsers/parse38.py | NeonOcean/Environment | 1 | 12771682 | <reponame>NeonOcean/Environment<gh_stars>1-10
# Copyright (c) 2017-2019 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
spark grammar differences over Python 3.7 for Python 3.8
"""
from __future__ import print_function
from uncompyle6.parser import PythonParserSingle
from spark_parser import DEFAULT_DEBUG as PARSER_DEFAULT_DEBUG
from uncompyle6.parsers.parse37 import Python37Parser
class Python38Parser(Python37Parser):
def p_38misc(self, args):
"""
stmt ::= async_for_stmt38
stmt ::= async_forelse_stmt38
stmt ::= for38
stmt ::= forelsestmt38
stmt ::= forelselaststmt38
stmt ::= forelselaststmtl38
stmt ::= tryfinally38
stmt ::= try_elsestmtl38
stmt ::= try_except_ret38
stmt ::= try_except38
stmt ::= whilestmt38
stmt ::= whileTruestmt38
stmt ::= call
# FIXME: this should be restricted to being inside a try block
stmt ::= except_ret38
# FIXME: this should be added only when seeing GET_AITER or YIELD_FROM
async_for_stmt38 ::= expr
GET_AITER
SETUP_FINALLY
GET_ANEXT
LOAD_CONST
YIELD_FROM
POP_BLOCK
store for_block
COME_FROM_FINALLY
END_ASYNC_FOR
# FIXME: come froms after the else_suite or END_ASYNC_FOR distinguish which of
# for / forelse is used. Add come froms and check of add up control-flow detection phase.
async_forelse_stmt38 ::= expr
GET_AITER
SETUP_FINALLY
GET_ANEXT
LOAD_CONST
YIELD_FROM
POP_BLOCK
store for_block
COME_FROM_FINALLY
END_ASYNC_FOR
else_suite
async_with_stmt ::= expr BEFORE_ASYNC_WITH GET_AWAITABLE LOAD_CONST YIELD_FROM
SETUP_ASYNC_WITH POP_TOP
suite_stmts
POP_TOP POP_BLOCK
BEGIN_FINALLY COME_FROM_ASYNC_WITH
WITH_CLEANUP_START
GET_AWAITABLE LOAD_CONST YIELD_FROM
WITH_CLEANUP_FINISH END_FINALLY
async_with_as_stmt ::= expr BEFORE_ASYNC_WITH GET_AWAITABLE LOAD_CONST YIELD_FROM
SETUP_ASYNC_WITH store
suite_stmts
POP_TOP POP_BLOCK
BEGIN_FINALLY COME_FROM_ASYNC_WITH
WITH_CLEANUP_START
GET_AWAITABLE LOAD_CONST YIELD_FROM
WITH_CLEANUP_FINISH END_FINALLY
return ::= ret_expr ROT_TWO POP_TOP RETURN_VALUE
for38 ::= expr get_iter store for_block JUMP_BACK
for38 ::= expr for_iter store for_block JUMP_BACK
for38 ::= expr for_iter store for_block JUMP_BACK POP_BLOCK
forelsestmt38 ::= expr for_iter store for_block POP_BLOCK else_suite
forelselaststmt38 ::= expr for_iter store for_block POP_BLOCK else_suitec
forelselaststmtl38 ::= expr for_iter store for_block POP_BLOCK else_suitel
whilestmt38 ::= testexpr l_stmts_opt COME_FROM JUMP_BACK POP_BLOCK
whilestmt38 ::= testexpr l_stmts_opt JUMP_BACK POP_BLOCK
whilestmt38 ::= testexpr returns POP_BLOCK
whilestmt38 ::= testexpr l_stmts JUMP_BACK
# while1elsestmt ::= l_stmts JUMP_BACK
whileTruestmt ::= l_stmts JUMP_BACK POP_BLOCK
while1stmt ::= l_stmts COME_FROM_LOOP
while1stmt ::= l_stmts COME_FROM JUMP_BACK COME_FROM_LOOP
whileTruestmt38 ::= l_stmts JUMP_BACK
for_block ::= l_stmts_opt _come_from_loops JUMP_BACK
except_cond1 ::= DUP_TOP expr COMPARE_OP jmp_false
POP_TOP POP_TOP POP_TOP
POP_EXCEPT
try_elsestmtl38 ::= SETUP_FINALLY suite_stmts_opt POP_BLOCK
except_handler38 COME_FROM
else_suitel opt_come_from_except
try_except ::= SETUP_FINALLY suite_stmts_opt POP_BLOCK
except_handler38
try_except38 ::= SETUP_FINALLY POP_BLOCK POP_TOP suite_stmts_opt
except_handler38a
try_except_ret38 ::= SETUP_FINALLY expr POP_BLOCK
RETURN_VALUE except_ret38a
# Note: there is a suite_stmts_opt which seems
# to be bookkeeping which is not expressed in source code
except_ret38 ::= SETUP_FINALLY expr ROT_FOUR POP_BLOCK POP_EXCEPT
CALL_FINALLY RETURN_VALUE COME_FROM
COME_FROM_FINALLY
suite_stmts_opt END_FINALLY
except_ret38a ::= COME_FROM_FINALLY POP_TOP POP_TOP POP_TOP
expr ROT_FOUR
POP_EXCEPT RETURN_VALUE END_FINALLY
except_handler38 ::= JUMP_FORWARD COME_FROM_FINALLY
except_stmts END_FINALLY opt_come_from_except
except_handler38a ::= COME_FROM_FINALLY POP_TOP POP_TOP POP_TOP
POP_EXCEPT POP_TOP stmts END_FINALLY
tryfinallystmt ::= SETUP_FINALLY suite_stmts_opt POP_BLOCK
BEGIN_FINALLY COME_FROM_FINALLY suite_stmts_opt
END_FINALLY
tryfinally38 ::= SETUP_FINALLY POP_BLOCK CALL_FINALLY
returns
COME_FROM_FINALLY END_FINALLY suite_stmts
tryfinally38 ::= SETUP_FINALLY POP_BLOCK CALL_FINALLY
returns
COME_FROM_FINALLY POP_FINALLY returns
END_FINALLY
tryfinally_return_stmt ::= SETUP_FINALLY suite_stmts_opt POP_BLOCK
BEGIN_FINALLY COME_FROM_FINALLY
POP_FINALLY suite_stmts_opt END_FINALLY
"""
def __init__(self, debug_parser=PARSER_DEFAULT_DEBUG):
super(Python38Parser, self).__init__(debug_parser)
self.customized = {}
def customize_grammar_rules(self, tokens, customize):
self.remove_rules("""
stmt ::= async_for_stmt37
stmt ::= for
stmt ::= forelsestmt
stmt ::= try_except36
async_for_stmt ::= SETUP_LOOP expr
GET_AITER
SETUP_EXCEPT GET_ANEXT LOAD_CONST
YIELD_FROM
store
POP_BLOCK JUMP_FORWARD COME_FROM_EXCEPT DUP_TOP
LOAD_GLOBAL COMPARE_OP POP_JUMP_IF_TRUE
END_FINALLY COME_FROM
for_block
COME_FROM
POP_TOP POP_TOP POP_TOP POP_EXCEPT POP_TOP POP_BLOCK
COME_FROM_LOOP
async_for_stmt37 ::= SETUP_LOOP expr
GET_AITER
SETUP_EXCEPT GET_ANEXT
LOAD_CONST YIELD_FROM
store
POP_BLOCK JUMP_BACK COME_FROM_EXCEPT DUP_TOP
LOAD_GLOBAL COMPARE_OP POP_JUMP_IF_TRUE
END_FINALLY for_block COME_FROM
POP_TOP POP_TOP POP_TOP POP_EXCEPT
POP_TOP POP_BLOCK
COME_FROM_LOOP
async_forelse_stmt ::= SETUP_LOOP expr
GET_AITER
SETUP_EXCEPT GET_ANEXT LOAD_CONST
YIELD_FROM
store
POP_BLOCK JUMP_FORWARD COME_FROM_EXCEPT DUP_TOP
LOAD_GLOBAL COMPARE_OP POP_JUMP_IF_TRUE
END_FINALLY COME_FROM
for_block
COME_FROM
POP_TOP POP_TOP POP_TOP POP_EXCEPT POP_TOP POP_BLOCK
else_suite COME_FROM_LOOP
for ::= SETUP_LOOP expr for_iter store for_block POP_BLOCK
for ::= SETUP_LOOP expr for_iter store for_block POP_BLOCK NOP
for_block ::= l_stmts_opt COME_FROM_LOOP JUMP_BACK
forelsestmt ::= SETUP_LOOP expr for_iter store for_block POP_BLOCK else_suite
forelselaststmt ::= SETUP_LOOP expr for_iter store for_block POP_BLOCK else_suitec
forelselaststmtl ::= SETUP_LOOP expr for_iter store for_block POP_BLOCK else_suitel
tryelsestmtl3 ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler COME_FROM else_suitel
opt_come_from_except
try_except ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
except_handler opt_come_from_except
tryfinallystmt ::= SETUP_FINALLY suite_stmts_opt POP_BLOCK
LOAD_CONST COME_FROM_FINALLY suite_stmts_opt
END_FINALLY
tryfinally36 ::= SETUP_FINALLY returns
COME_FROM_FINALLY suite_stmts_opt END_FINALLY
tryfinally_return_stmt ::= SETUP_FINALLY suite_stmts_opt POP_BLOCK
LOAD_CONST COME_FROM_FINALLY
""")
super(Python37Parser, self).customize_grammar_rules(tokens, customize)
self.check_reduce['ifstmt'] = 'tokens'
self.check_reduce['whileTruestmt38'] = 'tokens'
def reduce_is_invalid(self, rule, ast, tokens, first, last):
invalid = super(Python38Parser,
self).reduce_is_invalid(rule, ast,
tokens, first, last)
if invalid:
return invalid
if rule[0] == 'ifstmt':
# Make sure jumps don't extend beyond the end of the if statement.
l = last
if l == len(tokens):
l -= 1
if isinstance(tokens[l].offset, str):
last_offset = int(tokens[l].offset.split('_')[0], 10)
else:
last_offset = tokens[l].offset
for i in range(first, l):
t = tokens[i]
if t.kind == 'POP_JUMP_IF_FALSE':
if t.attr > last_offset:
return True
pass
pass
pass
elif rule[0] == 'whileTruestmt38':
t = tokens[last-1]
if t.kind == 'JUMP_BACK':
return t.attr != tokens[first].offset
pass
return False
class Python38ParserSingle(Python38Parser, PythonParserSingle):
pass
if __name__ == '__main__':
# Check grammar
p = Python38Parser()
p.check_grammar()
from uncompyle6 import PYTHON_VERSION, IS_PYPY
if PYTHON_VERSION == 3.8:
lhs, rhs, tokens, right_recursive = p.check_sets()
from uncompyle6.scanner import get_scanner
s = get_scanner(PYTHON_VERSION, IS_PYPY)
opcode_set = set(s.opc.opname).union(set(
"""JUMP_BACK CONTINUE RETURN_END_IF COME_FROM
LOAD_GENEXPR LOAD_ASSERT LOAD_SETCOMP LOAD_DICTCOMP LOAD_CLASSNAME
LAMBDA_MARKER RETURN_LAST
""".split()))
remain_tokens = set(tokens) - opcode_set
import re
remain_tokens = set([re.sub(r'_\d+$', '', t) for t in remain_tokens])
remain_tokens = set([re.sub('_CONT$', '', t) for t in remain_tokens])
remain_tokens = set(remain_tokens) - opcode_set
print(remain_tokens)
# print(sorted(p.rule2name.items()))
| 1.726563 | 2 |
Calibration/IsolatedParticles/test/python/proto_runIsolatedTracksNxNNzsData_cfg.py | ckamtsikis/cmssw | 852 | 12771683 | import FWCore.ParameterSet.Config as cms
process = cms.Process("L1SKIM")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 100000
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
####################### configure pool source #############################
process.source = cms.Source("PoolSource",
fileNames =cms.untracked.vstring(
'/store/data/Run2010A/MinimumBias/RECO/Apr21ReReco-v1/0000/08275F4A-5270-E011-9DC3-003048635E02.root'
),
skipEvents = cms.untracked.uint32(0)
)
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )
##################### digi-2-raw plus L1 emulation #########################
process.load("Configuration.StandardSequences.Services_cff")
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
#################### Conditions and L1 menu ################################
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag=autoCond['run1_data']
############ Skim the events according to the L1 seeds ####################
#select on HLT_HcalNZS_8E29 trigger
import HLTrigger.HLTfilters.hltLevel1GTSeed_cfi
process.skimL1Seeds = HLTrigger.HLTfilters.hltLevel1GTSeed_cfi.hltLevel1GTSeed.clone()
process.skimL1Seeds.L1GtReadoutRecordTag = cms.InputTag("gtDigis")
process.skimL1Seeds.L1GtObjectMapTag = cms.InputTag("hltL1GtObjectMap")
process.skimL1Seeds.L1CollectionsTag = cms.InputTag("l1extraParticles")
process.skimL1Seeds.L1MuonCollectionTag = cms.InputTag("l1extraParticles")
process.skimL1Seeds.L1SeedsLogicalExpression = "L1_SingleEG2 OR L1_SingleEG5 OR L1_SingleEG8 OR L1_SingleEG10 OR L1_SingleEG12 OR L1_SingleEG15 OR L1_SingleEG20 OR L1_SingleIsoEG5 OR L1_SingleIsoEG8 OR L1_SingleIsoEG10 OR L1_SingleIsoEG12 OR L1_SingleIsoEG15 OR L1_SingleJet6U OR L1_SingleJet10U OR L1_SingleJet20U OR L1_SingleJet30U OR L1_SingleJet40U OR L1_SingleJet50U OR L1_SingleJet60U OR L1_SingleTauJet10U OR L1_SingleTauJet20U OR L1_SingleTauJet30U OR L1_SingleTauJet50U OR L1_SingleMuOpen OR L1_SingleMu0 OR L1_SingleMu3 OR L1_SingleMu5 OR L1_SingleMu7 OR L1_SingleMu10 OR L1_SingleMu14 OR L1_SingleMu20 OR L1_ZeroBias"
# select on HLT_HcalPhiSym trigger
process.load("HLTrigger.HLTfilters.hltLevel1Activity_cfi")
process.hltLevel1Activity.L1GtReadoutRecordTag = cms.InputTag('gtDigis')
######################## Configure Analyzer ###############################
process.load("RecoLocalCalo.EcalRecAlgos.EcalSeverityLevelESProducer_cfi")
process.load("Calibration.IsolatedParticles.isolatedTracksNxN_cfi")
process.isolatedTracksNxN.Verbosity = cms.untracked.int32( 0 )
process.isolatedTracksNxN.HBHERecHitSource = cms.InputTag("hbhereco")
process.isolatedTracksNxN.L1TriggerAlgoInfo = True
#process.isolatedTracksNxN.DebugL1Info = True
process.isolatedTracksNxN_NZS = process.isolatedTracksNxN.clone(
Verbosity = cms.untracked.int32( 0 ),
HBHERecHitSource = cms.InputTag("hbherecoMB"),
L1TriggerAlgoInfo = True
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string('IsolatedTracksNxNData.root')
)
# configure Technical Bits to ensure collision and remove BeamHalo
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')
process.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')
process.hltLevel1GTSeed.L1TechTriggerSeeding = cms.bool(True)
process.hltLevel1GTSeed.L1SeedsLogicalExpression = cms.string('0 AND NOT (36 OR 37 OR 38 OR 39)')
# filter out scrapping events
process.noScraping= cms.EDFilter("FilterOutScraping",
applyfilter = cms.untracked.bool(True),
debugOn = cms.untracked.bool(False), ## Or 'True' to get some per-event info
numtrack = cms.untracked.uint32(10),
thresh = cms.untracked.double(0.25)
)
# select on primary vertex
process.primaryVertexFilter = cms.EDFilter("GoodVertexFilter",
vertexCollection = cms.InputTag('offlinePrimaryVertices'),
minimumNDOF = cms.uint32(4) ,
maxAbsZ = cms.double(25.0),
maxd0 = cms.double(5.0)
)
#=============================================================================
# define an EndPath to analyze all other path results
process.hltTrigReport = cms.EDAnalyzer( 'HLTrigReport',
HLTriggerResults = cms.InputTag( 'TriggerResults','','HLT')
)
process.load("L1Trigger.GlobalTriggerAnalyzer.l1GtTrigReport_cfi")
process.l1GtTrigReport.L1GtRecordInputTag = 'gtDigis'
process.l1GtTrigReport.PrintVerbosity = 1
#=============================================================================
#### by Benedikt
process.p1 = cms.Path(process.primaryVertexFilter * process.hltLevel1GTSeed * process.noScraping * process.skimL1Seeds *process.isolatedTracksNxN * process.isolatedTracksNxN_NZS)
process.e = cms.EndPath(process.l1GtTrigReport + process.hltTrigReport)
| 1.257813 | 1 |
build/plugins/lib/nots/package_manager/pnpm/tests/workspace.py | jochenater/catboost | 6,989 | 12771684 | <filename>build/plugins/lib/nots/package_manager/pnpm/tests/workspace.py
from build.plugins.lib.nots.package_manager.base import PackageJson
from build.plugins.lib.nots.package_manager.pnpm.workspace import PnpmWorkspace
def test_workspace_get_paths():
ws = PnpmWorkspace(path="/packages/foo/pnpm-workspace.yaml")
ws.packages = set([".", "../bar", "../../another/baz"])
assert sorted(ws.get_paths()) == [
"/another/baz",
"/packages/bar",
"/packages/foo",
]
def test_workspace_set_from_package_json():
ws = PnpmWorkspace(path="/packages/foo/pnpm-workspace.yaml")
pj = PackageJson(path="/packages/foo/package.json")
pj.data = {
"dependencies": {
"@a/bar": "workspace:../bar",
},
"devDependencies": {
"@a/baz": "workspace:../../another/baz",
},
"peerDependencies": {
"@a/qux": "workspace:../../another/qux",
},
"optionalDependencies": {
"@a/quux": "workspace:../../another/quux",
}
}
ws.set_from_package_json(pj)
assert sorted(ws.get_paths()) == [
"/another/baz",
"/another/quux",
"/another/qux",
"/packages/bar",
"/packages/foo",
]
def test_workspace_merge():
ws1 = PnpmWorkspace(path="/packages/foo/pnpm-workspace.yaml")
ws1.packages = set([".", "../bar", "../../another/baz"])
ws2 = PnpmWorkspace(path="/another/baz/pnpm-workspace.yaml")
ws2.packages = set([".", "../qux"])
ws1.merge(ws2)
assert sorted(ws1.get_paths()) == [
"/another/baz",
"/another/qux",
"/packages/bar",
"/packages/foo",
]
| 2.109375 | 2 |
python/src/yalix/globals.py | rm-hull/yalix | 4 | 12771685 | <reponame>rm-hull/yalix
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Some predefined functions injected into an environment
"""
import functools
import operator
import random
import math
import time
from yalix.py3_compat import long_t
from yalix.utils import log_progress
from yalix.parser import scheme_parser
from yalix.environment import Env
from yalix.exceptions import EvaluationError
from yalix.interpreter import Atom, InterOp, Lambda, List, \
Realize, Symbol, SpecialForm, Promise, __special_forms__
__core_libraries__ = ['core', 'hof', 'num', 'macros', 'repr', 'test']
def create_initial_env():
env = Env()
with log_progress("Creating initial environment"):
bootstrap_special_forms(env)
bootstrap_python_functions(env)
for lib in __core_libraries__:
with log_progress("Loading library: " + lib):
bootstrap_lisp_functions(env, "../../core/{0}.ylx".format(lib))
return env
def gensym(prefix='G__'):
return Symbol(prefix + str(Env.next_id()))
def interop(fun, arity, variadic=False):
""" Helper to create a lisp function from a python function """
bind_variables = [gensym() for _ in range(arity)]
if variadic:
# Insert the variadic marker at the last-but one position
formals = list(bind_variables)
formals.insert(-1, Lambda.VARIADIC_MARKER)
bind_variables[-1] = Realize(bind_variables[-1])
else:
formals = bind_variables
return Lambda(List(*formals), InterOp(fun, *bind_variables))
def doc(value):
doc = getattr(value, '__docstring__', None)
if doc:
print('-----------------')
print(doc)
def source(value):
from yalix.utils import highlight_syntax
from yalix.source_view import source_view
src = source_view(value)
if src:
print('-----------------')
print(highlight_syntax(source_view(value)))
def print_(value):
print(str_(value))
def str_(args=None):
def strnil(x):
return '' if x is None else str(x)
if args is None:
return ''
return functools.reduce(lambda x, y: strnil(x) + strnil(y), args)
def format_(format_spec, args=None):
if args is None:
args = []
return format_spec.format(*args)
def error(msg):
raise EvaluationError(None, msg)
def atom_QUESTION(value):
""" Checks if the supplied value is an atom """
return value is None or type(value) in [str, int, long_t, float, bool, Symbol]
def pair_QUESTION(value):
return isinstance(value, tuple)
def promise_QUESTION(value):
return isinstance(value, Promise)
def realized_QUESTION(value):
return promise_QUESTION(value) and value.realized
def read_string(value):
return scheme_parser().parseString(value, parseAll=True).asList()[0]
def car(value):
if value is None:
return None
elif isinstance(value, tuple):
return value[0]
else:
raise EvaluationError(value, "Cannot car on non-cons cell: '{0}'", value)
def cdr(value):
if value is None:
return None
elif isinstance(value, tuple):
return value[1]
else:
raise EvaluationError(value, "Cannot cdr on non-cons cell: '{0}'", value)
def bootstrap_lisp_functions(env, from_file):
for ast in scheme_parser().parseFile(from_file, parseAll=True).asList():
# TODO: brand AST nodes with filename
ast.eval(env)
class EvalWrapper(object):
def __init__(self, env):
self.env = env
def __setitem__(self, name, primitive):
self.env[name] = primitive.eval(self.env)
def bootstrap_special_forms(env):
env = EvalWrapper(env)
for name in __special_forms__.keys():
env[name] = SpecialForm(name)
def bootstrap_python_functions(env):
env = EvalWrapper(env)
env['*debug*'] = Atom(False)
env['nil'] = Atom(None)
env['nil?'] = interop(lambda x: x is None, 1)
env['atom?'] = interop(atom_QUESTION, 1)
env['pair?'] = interop(pair_QUESTION, 1)
env['promise?'] = interop(promise_QUESTION, 1)
env['realized?'] = interop(realized_QUESTION, 1)
env['cons'] = interop(lambda x, y: (x, y), 2)
env['car'] = interop(car, 1)
env['cdr'] = interop(cdr, 1)
env['gensym'] = interop(gensym, 0)
env['symbol'] = interop(lambda x: Symbol(x), 1)
env['symbol?'] = interop(lambda x: isinstance(x, Symbol), 1)
env['interop'] = interop(interop, 2)
env['doc'] = interop(doc, 1)
env['source'] = interop(source, 1)
env['print'] = interop(print_, 1, variadic=True)
env['format'] = interop(format_, 2, variadic=True)
env['str'] = interop(str_, 1, variadic=True)
env['read-string'] = interop(read_string, 1) # Read just one symbol
env['error'] = interop(error, 1)
env['epoch-time'] = interop(time.time, 0)
# Basic Arithmetic Functions
env['add'] = interop(operator.add, 2)
env['sub'] = interop(operator.sub, 2)
env['mul'] = interop(operator.mul, 2)
env['div'] = interop(operator.truediv, 2)
env['quot'] = interop(operator.floordiv, 2)
env['negate'] = interop(operator.neg, 1)
# String / Sequence Functions
env['contains?'] = interop(operator.contains, 2)
# Bitwise Ops
env['bitwise-and'] = interop(operator.and_, 2)
env['bitwise-xor'] = interop(operator.xor, 2)
env['bitwise-invert'] = interop(operator.invert, 2)
env['bitwise-or'] = interop(operator.or_, 2)
env['bitwise-and'] = interop(operator.and_, 2)
env['bitwise-left-shift'] = interop(operator.lshift, 2)
env['bitwise-right-shift'] = interop(operator.rshift, 2)
env['not'] = interop(operator.not_, 1)
# Comparison & Ordering
env['not='] = interop(operator.ne, 2)
env['<'] = interop(operator.lt, 2)
env['<='] = interop(operator.le, 2)
env['='] = interop(operator.eq, 2)
env['>='] = interop(operator.ge, 2)
env['>'] = interop(operator.gt, 2)
env['random'] = interop(random.random, 0)
# Number theoretic Functions
env['ceil'] = interop(math.ceil, 1)
env['floor'] = interop(math.floor, 1)
env['mod'] = interop(operator.mod, 2)
env['trunc'] = interop(math.trunc, 1)
# Power & Logarithmic Functions
env['exp'] = interop(math.exp, 1)
env['log'] = interop(math.log, 2)
env['log10'] = interop(math.log10, 1)
env['pow'] = interop(math.pow, 2)
env['sqrt'] = interop(math.sqrt, 1)
# Trigonomeric Functions
env['acos'] = interop(math.acos, 1)
env['asin'] = interop(math.asin, 1)
env['atan'] = interop(math.atan, 1)
env['atan2'] = interop(math.atan2, 1)
env['cos'] = interop(math.cos, 1)
env['hypot'] = interop(math.hypot, 2)
env['sin'] = interop(math.sin, 1)
env['tan'] = interop(math.tan, 1)
# Angular Conversion
env['degrees'] = interop(math.degrees, 1)
env['radians'] = interop(math.radians, 1)
# Hyperbolic Functions
env['acosh'] = interop(math.acosh, 1)
env['asinh'] = interop(math.asinh, 1)
env['atanh'] = interop(math.atanh, 1)
env['cosh'] = interop(math.cosh, 1)
env['sinh'] = interop(math.sinh, 1)
env['tanh'] = interop(math.tanh, 1)
# Constants
env['math/pi'] = Atom(math.pi)
env['math/e'] = Atom(math.e)
| 2.078125 | 2 |
python/pygame/basic/collison-detection.py | batturo/lightning-projects | 15 | 12771686 | import os
import random
import sys
import pygame
from pygame.locals import *
class Satyr(pygame.sprite.Sprite):
def __init__(self, position, ipath, scale=0.5):
pygame.sprite.Sprite.__init__(self)
img = pygame.image.load(ipath)
rect = img.get_rect(center=position)
w, h = rect.size[0], rect.size[1]
w, h = int(w * scale), int(h * scale)
img = pygame.transform.scale(img, (w, h))
rect = img.get_rect(center=position)
self.image = img
self.rect = rect
self.rect.center = position
def draw(self, surface, position):
self.rect.center = position
surface.blit(self.image, position)
class Image(pygame.sprite.Sprite):
def __init__(self, position, ipath, scale=1.0):
pygame.sprite.Sprite.__init__(self)
img = pygame.image.load(ipath)
w, h = img.get_rect().size[0], img.get_rect().size[1]
w, h = int(w * scale), int(h * scale)
img = pygame.transform.scale(img, (w, h))
rect = img.get_rect(center=position)
self.image = img
self.rect = rect
self.rect.center = position
self.dx = 1 if random.random() < 0.5 else -1
self.dy = 1 if random.random() < 0.5 else 1
def draw(self, surface):
surface.blit(self.image, self.rect.center)
def update(self, width, height):
x, y = self.rect.center
if x + self.image.get_rect().size[0] >= width:
self.dx = -1
elif x <= 0:
self.dx = 1
if y + self.image.get_rect().size[1] >= height:
self.dy = -1
elif y <= 0:
self.dy = 1
x, y = x + 1 * self.dx, y + 1 * self.dy
self.rect.center = x, y
def start():
pygame.init()
FPS = 30
width = 400
height = 400
DISPLAYSURF = pygame.display.set_mode((width, height))
DISPLAYSURF.fill((255, 255, 255))
pygame.display.set_caption('Key Events')
fps_clock = pygame.time.Clock()
satyr = Satyr((200, 200), './images/Satyr_01_Idle_000.png', scale=0.25)
ball = Image((random.randint(0, width), random.randint(0, height)), './images/ball.png', scale=0.25)
ball_group = pygame.sprite.Group()
ball_group.add(ball)
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
DISPLAYSURF.fill((255, 255, 255, 0))
if pygame.sprite.spritecollide(satyr, ball_group, False):
print(f'collided {ball.rect.center}')
satyr.draw(DISPLAYSURF, pygame.mouse.get_pos())
ball.draw(DISPLAYSURF)
ball.update(width, height)
pygame.display.update()
fps_clock.tick(FPS)
if __name__ == '__main__':
os.environ['SDL_VIDEO_CENTERED'] = '1'
start()
| 2.828125 | 3 |
tests/test_processor.py | alisaifee/jira-cli | 125 | 12771687 | <gh_stars>100-1000
import unittest
import mock
from jiracli.interface import build_parser, cli
class AddCommandTests(unittest.TestCase):
def test_issue_type_parsing(self):
"Previously, calling this would raise an exception on python3"
with mock.patch("jiracli.interface.print_output"):
with mock.patch("jiracli.interface.prompt") as prompt:
with mock.patch("jiracli.interface.initialize") as init:
init().get_issue_types.return_value = {'story': 1}
cli("new title --type story --project FOO --description bar".split(" "))
| 2.484375 | 2 |
Metodos Computacionales Uniandes/Code/ejercicio_28.py | aess14/Cursos-Uniandes | 0 | 12771688 | <filename>Metodos Computacionales Uniandes/Code/ejercicio_28.py
#Escriba una función en python que toma como entrada dos listas
#(valores_x, valores_y) y el grado de un polinomio (p_poli, un entero
# positivo) y ajusta
#por mínimos cuadrados el polinomio de orden p_poli que mejor ajusta
#los datos en valores_x y valores_y. La función debe implementar el
#ajuste en términos de operaciones de álgebra lineal trabajados en el
#"Ejercicio 28 - primera parte".
#La función de debe llamar ajuste_matricial.
#La función debe tomar como entrada, en ese orden, las variables
#valores_x, valores_y y p_poli.
#La función debe hacer el ajuste y hacer una gráfica donde se muestran
#los valores de entrada (valores_x, valores_y) y el polinomio de
#ajuste. La gráfica producida se debe guardar como "ajuste.png"
#(siguiendo el estilo de la Figura 1.4 del texto guía).
#La solución debe estar en un archivo llamado
#"ApellidoNombre_Ejercicio28.py" donde Apellido y Nombre debe
#reemplazarlos con su apellido y nombre. Suba ese archivo como
#respuesta a esta actividad.
#Al ejecutar "python ApellidoNombre_Ejercicio28.py" no se debe producir
#ningún error. Al llamar la función con el nombre solicitado no se debe
#producir ningún error.
import numpy as np
import matplotlib.pyplot as plt
def ajuste_matricial(valores_x, valores_y, p_poli):
N = len(valores_x)
S = np.ones((N,p_poli+1))
for i in range(N):
for j in range(p_poli+1):
S[i,j] = (valores_x[i])**j
S_inv = np.linalg.pinv(S)
Y = np.array(valores_y)
C = S_inv @ Y
x = np.linspace(np.min(valores_x), np.max(valores_x), 100)
y = np.zeros(100)
for i in range(p_poli+1):
y += C[i]*(x**i)
plt.figure()
plt.scatter(valores_x, valores_y)
plt.plot(x, y)
plt.savefig("ajuste.png")
#ajuste_matricial([1,2,4,6],[4,8,7,1],3)
#ajuste_matricial([-2.0, -2.0, 0.0, 3.0, 4.0], [5.0, 1.0, -3.0, -2.0, -5.5],1)
| 3.546875 | 4 |
toontown/uberdog/GlobalLobbyManagerAI.py | CrankySupertoon01/Toontown-2 | 1 | 12771689 | from direct.distributed.DistributedObjectGlobalAI import DistributedObjectGlobalAI
from direct.distributed.PyDatagram import *
from direct.directnotify.DirectNotifyGlobal import directNotify
class GlobalLobbyManagerAI(DistributedObjectGlobalAI):
notify = directNotify.newCategory('GlobalLobbyManagerAI')
def announceGenerate(self):
DistributedObjectGlobalAI.announceGenerate(self)
self.sendUpdate('lobbyManagerAIHello', [simbase.air.lobbyManager.doId])
def sendAddLobby(self, avId, lobbyId):
self.sendUpdate('addLobby', [avId, lobbyId])
def queryLobbyForHost(self, hostId):
self.sendUpdate('queryLobby', [hostId])
def d_lobbyStarted(self, lobbyId, shardId, zoneId, hostName):
self.sendUpdate('lobbyHasStarted', [lobbyId, shardId, zoneId, hostName])
def lobbyStarted(self, lobbyId, shardId, zoneId, hostName):
pass
def d_lobbyDone(self, lobbyId):
self.sendUpdate('lobbyDone', [lobbyId])
def lobbyDone(self, lobbyId):
pass
def d_toonJoinedLobby(self, lobbyId, avId):
self.sendUpdate('toonJoinedLobby', [lobbyId, avId])
def toonJoinedLobby(self, lobbyId, avId):
pass
def d_toonLeftLobby(self, lobbyId, avId):
self.sendUpdate('toonLeftLobby', [lobbyId, avId])
def toonLeftLobby(self, lobbyId, avId):
pass
def d_requestLobbySlot(self, lobbyId, avId):
self.sendUpdate('requestLobbySlot', [lobbyId, avId])
def requestLobbySlot(self, lobbyId, avId):
pass
def d_allocIds(self, numIds):
self.sendUpdate('allocIds', [numIds])
def allocIds(self, numIds):
pass
| 1.890625 | 2 |
src/ensemble_voting.py | andifunke/semeval18task12 | 2 | 12771690 | """
Combines predictions based on votes by a set of answer files.
"""
import re
from os import listdir
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from sklearn.metrics import accuracy_score
from .constants import LABEL
from .preprocessing import get_train_dev_test
def vote(y_true, y_pred, conf):
""" confidence vote """
conf_argmax = np.argmax(conf, axis=0)
conf_vote = y_pred.T[np.arange(len(y_pred.T)), conf_argmax]
acc_conf = accuracy_score(y_true=y_true, y_pred=conf_vote)
""" majority vote """
pred = np.mean(y_pred, axis=0)
# in case of a tie use the predictions from the confidence vote
tie = np.isclose(pred, 0.5)
pred[tie] = conf_vote[tie]
pred = (pred >= 0.5)
acc_major = accuracy_score(y_true=y_true, y_pred=pred)
return acc_conf, acc_major
def plot_axis(df, ax, legend_pos='orig1'):
df.plot(x=np.arange(1, len(df) + 1), ax=ax, use_index=False, xlim=[-25, len(df) + 25], ylim=[0.5, 0.775],
style=['-', '-', '-', '-'], lw=1.5,
yticks=[.5, .525, .55, .575, .6, .625, .65, .675, .7, .725, .75, .775])
ax.lines[1].set_linewidth(0.9) # 1.15
ax.lines[3].set_linewidth(0.9) # 1.15
col1 = ax.lines[0].get_color()
col2 = ax.lines[2].get_color()
ax.lines[1].set_color(tuple(1.3*c for c in col1)) # 1.1*
ax.lines[3].set_color(tuple(1.3*c for c in col2)) # 1.1*
ax.grid(b=True, which='major', linestyle='-', linewidth=0.85)
ax.grid(b=True, which='minor', linestyle=':', linewidth=0.75)
if legend_pos == 'orig1':
ax.legend(loc='center', bbox_to_anchor=(0.5, 0.365))
elif legend_pos == 'orig2':
ax.legend().remove()
elif legend_pos == 'alt1':
ax.legend(loc='center', bbox_to_anchor=(0.5, 0.14))
elif legend_pos == 'alt2':
ax.legend(loc='lower left', bbox_to_anchor=(0.02, 0))
else:
ax.legend()
ax.set_xlabel('number of models', weight='bold')
ax.set_ylabel('accuracy', weight='bold')
# majorLocator_x = MultipleLocator(500)
majorLocator_y = MultipleLocator(.05)
majorFormatter_y = FormatStrFormatter('%.2f')
minorLocator_y = MultipleLocator(.025)
# ax.xaxis.set_major_locator(majorLocator_x)
ax.yaxis.set_major_locator(majorLocator_y)
ax.yaxis.set_major_formatter(majorFormatter_y)
ax.yaxis.set_minor_locator(minorLocator_y)
def plot_figure(dfs: list, name, show=True, save=False, legend_pos: list=None, align='h'):
length = len(dfs)
if legend_pos is None:
legend_pos = [''] * length
sns.set(color_codes=True, font_scale=1)
sns.set_style("whitegrid", {'legend.frameon': True})
sns.set_palette("deep")
if align == 'h':
fig, ax = plt.subplots(ncols=length, figsize=(5*length, 5), sharey=True)
else:
fig, ax = plt.subplots(nrows=length, figsize=(5, 5*length))
if length > 1:
for i, df in enumerate(dfs):
plot_axis(df, ax[i], legend_pos=legend_pos[i])
ax[0].set_title('original dataset')
ax[1].set_title('alternative (randomized) data split')
else:
plot_axis(dfs[0], ax, legend_pos=legend_pos[0])
fig.tight_layout()
if show:
plt.show()
if save:
fig.savefig(name + '.pdf', bbox_inches='tight')
plt.close('all')
def build_df(files, y_true):
probs_ser_lst = [pd.Series(np.load(f).flatten(), name=f[-48:-4].replace(' ', '0')) for f in files]
probs_df = pd.DataFrame(probs_ser_lst)
preds_df = probs_df.applymap(lambda x: x >= 0.5)
confs_df = probs_df.apply(lambda x: np.abs(x - 0.5))
accs_ser = preds_df.apply(lambda row: accuracy_score(y_true=y_true, y_pred=row), axis=1)
df = pd.concat([accs_ser, preds_df, probs_df, confs_df], axis=1,
keys=['acc', 'pred', 'prob', 'conf'])
return df
def main():
names = {
# 'tensorL05con2redo2': '/media/andreas/Linux_Data/hpc-semeval/tensorL05con2redo2/out/',
'alt_split_odd': '/media/andreas/Linux_Data/hpc-semeval/alt_split_odd_both/',
}
_, df_dev_data, df_tst_data = get_train_dev_test(options=dict(alt_split=True))
dev_true = df_dev_data[LABEL].values.flatten()
tst_true = df_tst_data[LABEL].values.flatten()
for k, d in names.items():
directory = listdir(d)
dev_files = [d + f for f in directory if re.match(r'^probabilities-' + 'dev', f)]
tst_files = [d + f for f in directory if re.match(r'^probabilities-' + 'tst', f)]
df_dev = build_df(dev_files, dev_true)
df_tst = build_df(tst_files, tst_true)
df = pd.concat([df_dev, df_tst], axis=1, keys=['dev', 'tst'])
df = df.sort_values(('dev', 'acc', 0), ascending=False)
dev_acc_filter = 0.
if dev_acc_filter:
row_filter = df['dev', 'acc', 0] >= dev_acc_filter
df = df[row_filter.values]
print('filtered for dev accuracies >=', dev_acc_filter)
dev_mean = np.mean(df['dev', 'acc', 0].values)
tst_mean = np.mean(df['tst', 'acc', 0].values)
dev_preds_np = df['dev', 'pred'].values
dev_confs_np = df['dev', 'conf'].values
tst_preds_np = df['tst', 'pred'].values
tst_confs_np = df['tst', 'conf'].values
# print more stats
if False:
pd.set_option('display.float_format', lambda x: '%.6f' % x)
print('dev:\n', pd.Series(dev_mean).describe())
print('test:\n', pd.Series(tst_mean).describe())
dev_conf_scores = list()
tst_conf_scores = list()
dev_major_scores = list()
tst_major_scores = list()
for i in range(1, len(df)+1):
acc_conf_dev, acc_major_dev = vote(dev_true, y_pred=dev_preds_np[:i], conf=dev_confs_np[:i])
acc_conf_tst, acc_major_tst = vote(tst_true, y_pred=tst_preds_np[:i], conf=tst_confs_np[:i])
dev_conf_scores.append(acc_conf_dev)
tst_conf_scores.append(acc_conf_tst)
dev_major_scores.append(acc_major_dev)
tst_major_scores.append(acc_major_tst)
mtrx = {
# 'dev: confidence vote': dev_conf_scores,
# 'test: confidence vote': tst_conf_scores,
'test: mean accuracy': tst_mean,
'dev: sorted accuracy': df['dev', 'acc', 0],
'dev: majority vote': dev_major_scores,
'test: majority vote': tst_major_scores,
# 'dev: mean accuracy': dev_mean,
}
df = pd.DataFrame(mtrx)
plot_figure([df], k + 'all_')
# df.to_csv('../out/alt-split_2560.csv', sep='\t')
if __name__ == '__main__':
# TODO: clean up code or add argument flags
# main()
# df1 = pd.read_csv('../out/orig-split.csv', sep='\t')
df2 = pd.read_csv('../out/alt-split_2560.csv', sep='\t')
# plot_figure([df1], '../out/orig-split_2', save=True, legend_pos=['orig1'])
plot_figure([df2], '../out/alt-split_2560', save=True, legend_pos=['alt1'])
# plot_figure([df1, df2], '../out/ensemble_h_2', save=True, legend_pos=['orig2', 'alt2'], align='h')
# plot_figure([df1, df2], '../out/ensemble_v_2', save=True, legend_pos=['orig2', 'alt2'], align='v')
| 2.734375 | 3 |
tests/test_color.py | underwatergrasshopper/PyUnderGUI | 0 | 12771691 | <filename>tests/test_color.py
import math
from TestKit import *
from UnderGUI.Color import *
__all__ = ['test_color']
def test_color():
### ColorF ###
c = ColorF(1, 0.5, 0.75, 0.25).to_color_i()
assert c.r == 255 and c.g == 127 and c.b == 191 and c.a == 63
c = ColorF(1, 0.5, 0.75, 0.25).to_color_f()
assert math.isclose(c.r, 1.0, abs_tol=0.01) and math.isclose(c.g, 0.5, abs_tol=0.01) and math.isclose(c.b, 0.75, abs_tol=0.01) and math.isclose(c.a, 0.25, abs_tol=0.01)
c = ColorF(1, 0.5, 0.75, 0.25).to_color_b()
assert c.r == b'\xFF' and c.g == b'\x7F' and c.b == b'\xBF' and c.a ==b'\x3F'
### ColorI ###
c = ColorI(255, 127, 191, 63).to_color_i()
assert c.r == 255 and c.g == 127 and c.b == 191 and c.a == 63
c = ColorI(255, 127, 191, 63).to_color_f()
assert math.isclose(c.r, 1.0, abs_tol=0.01) and math.isclose(c.g, 0.5, abs_tol=0.01) and math.isclose(c.b, 0.75, abs_tol=0.01) and math.isclose(c.a, 0.25, abs_tol=0.01)
c = ColorI(255, 127, 191, 63).to_color_b()
assert c.r == b'\xFF' and c.g == b'\x7F' and c.b == b'\xBF' and c.a ==b'\x3F'
### ColorB ###
c = ColorB(b'\xFF', b'\x7F', b'\xBF', b'\x3F').to_color_i()
assert c.r == 255 and c.g == 127 and c.b == 191 and c.a == 63
c = ColorB(b'\xFF', b'\x7F', b'\xBF', b'\x3F').to_color_f()
assert math.isclose(c.r, 1.0, abs_tol=0.01) and math.isclose(c.g, 0.5, abs_tol=0.01) and math.isclose(c.b, 0.75, abs_tol=0.01) and math.isclose(c.a, 0.25, abs_tol=0.01)
c = ColorB(b'\xFF', b'\x7F', b'\xBF', b'\x3F').to_color_b()
assert c.r == b'\xFF' and c.g == b'\x7F' and c.b == b'\xBF' and c.a ==b'\x3F'
c = ColorB(b'\xFF\x7F\xBF', a = b'\x3F')
assert c.r == b'\xFF' and c.g == b'\x7F' and c.b == b'\xBF' and c.a ==b'\x3F'
c = ColorB(b'\xFF\x7F\xBF\x3F')
assert c.r == b'\xFF' and c.g == b'\x7F' and c.b == b'\xBF' and c.a ==b'\x3F'
# immediate tests
#print(int.from_bytes(b'\xff', "little"))
#print((255).to_bytes(1, byteorder='little'))
if __name__ == "__main__":
run_test(test_color)
| 2.796875 | 3 |
roses/effect_size/test/vargha_delaney_test.py | jacksonpradolima/roses | 7 | 12771692 | <reponame>jacksonpradolima/roses
import unittest
import pandas as pd
from roses.effect_size.vargha_delaney import VD_A, VD_A_DF, reduce
class RunningEffectSize(unittest.TestCase):
def test_VD_A_negligible(self):
# negligible
array1 = [0.8236111111111111, 0.7966666666666666, 0.923611111111111, 0.8197222222222222, 0.7108333333333333]
array2 = [0.8052777777777779, 0.8172222222222221, 0.8322222222222223, 0.783611111111111, 0.8141666666666666]
result = VD_A(array1, array2)
self.assertTrue(result[0] == 0.56 and result[1] == 'negligible')
def test_VD_A_small(self):
# small
array1 = [0.478515625, 0.4638671875, 0.4638671875, 0.4697265625, 0.4638671875, 0.474609375, 0.4814453125,
0.4814453125,
0.4697265625, 0.4814453125, 0.474609375, 0.4833984375, 0.484375, 0.44921875, 0.474609375, 0.484375,
0.4814453125, 0.4638671875, 0.484375, 0.478515625, 0.478515625, 0.45703125, 0.484375, 0.419921875,
0.4833984375, 0.478515625, 0.4697265625, 0.484375, 0.478515625, 0.4638671875]
array2 = [0.4814453125, 0.478515625, 0.44921875, 0.4814453125, 0.4638671875, 0.478515625, 0.474609375, 0.4638671875,
0.474609375, 0.44921875, 0.474609375, 0.478515625, 0.478515625, 0.474609375, 0.4697265625, 0.474609375,
0.45703125, 0.4697265625, 0.478515625, 0.4697265625, 0.4697265625, 0.484375, 0.45703125, 0.474609375,
0.474609375, 0.4638671875, 0.45703125, 0.474609375, 0.4638671875, 0.4306640625]
result = VD_A(array1, array2)
self.assertTrue(result[0] == 0.6405555555555555 and result[1] == 'small')
def test_VD_A_medium(self):
# medium
array1 = [0.9108333333333334, 0.8755555555555556, 0.900277777777778, 0.9274999999999999, 0.8777777777777779]
array2 = [0.8663888888888888, 0.8802777777777777, 0.7816666666666667, 0.8377777777777776, 0.9305555555555556]
result = VD_A(array1, array2)
self.assertTrue(result[0] == 0.72 and result[1] == 'medium')
def test_VD_A_large(self):
# Large
array1 = [0.9108333333333334, 0.8755555555555556, 0.900277777777778, 0.9274999999999999, 0.8777777777777779]
array2 = [0.7202777777777778, 0.77, 0.8544444444444445, 0.7947222222222222, 0.7577777777777778]
result = VD_A(array1, array2)
self.assertTrue(result[0] == 1.0 and result[1] == 'large')
def test_VD_A_DF(self):
# df = pd.read_csv('../../../resources/kruskal.csv', sep=";")
df = pd.read_csv('./resources/kruskal.csv', sep=";")
reduced = reduce(VD_A_DF(df, 'fitness', 'algorithm'), 'AlgorithmA')
first_row = reduced.iloc[0]
self.assertTrue(first_row['base'] == 'AlgorithmA'
and first_row['compared_with'] == 'AlgorithmB'
and first_row['estimate'] == '1.0'
and first_row['magnitude'] == 'large'
and first_row['effect_size_symbol'] == '$\\blacktriangle$')
if __name__ == '__main__':
unittest.main() | 2.734375 | 3 |
DeepFashionModel/classDetect/predict_and_pickle.py | RexBarker/DeepF | 0 | 12771693 | <reponame>RexBarker/DeepF
# given an input model, run batch prediction and pickle results
import os
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
import numpy as np
import argparse
import pickle
parser = argparse.ArgumentParser()
parser.add_argument('-model',type=str,dest='modelfile',required=True,
help='Required: model file')
parser.add_argument('-testdir',type=str,dest='testdir',required=True,
help='Required: test directory')
parser.add_argument('-nsteps',type=int,dest='nsteps',default = 2000,
help='Optional: number of steps (def=2000)')
parser.add_argument('-nbatch',type=int,dest='nbatch',default = 32,
help='Optional: number of images per batch (def=32)')
args = parser.parse_args()
assert os.path.exists(args.modelfile), "Could not find model file!"
resultFile = os.path.basename(args.modelfile).split('.')[0] + '.pickle'
model = load_model(args.modelfile)
test_datagen = ImageDataGenerator(rescale=1./255.)
test_iterator = test_datagen.flow_from_directory(directory=args.testdir,
shuffle=False,
batch_size=args.nbatch,
seed= 1234,
class_mode='categorical',
target_size=(200, 200))
# collect batch predictions
y_test_col = None
y_pred_col = None
for _ in range(args.nsteps):
x_test,y_test = next(test_iterator)
y_pred = model.predict_on_batch(x_test)
if y_test_col is None:
y_test_col = y_test
y_pred_col = y_pred
else:
y_test_col = np.append(y_test_col,y_test,axis=0)
y_pred_col = np.append(y_pred_col,y_pred,axis=0)
# dump to a pickle file, for use with ROC curve producer
with open(resultFile, 'wb') as fp:
pickle.dump((y_test_col,y_pred_col),fp) | 2.765625 | 3 |
src/ProjectScript.py | JacobSal/openSIM | 0 | 12771694 | <reponame>JacobSal/openSIM
#%% Imports
import opensim as osim
from opensim import Vec3
from os.path import join, abspath, dirname
from os import mkdir
import numpy as np
import matplotlib.pyplot as plt
#%% Path
global cfpath, omArm26Dir
cfpath = dirname(__file__)
prjPath = join(cfpath,'_data','final_project_saves')
modSavePaths = join(prjPath,'DynamicWalkerModel.osim')
#%% Definitions
def funExp(d,ddot):
out = [] # force in N
c = 0.01 # N*m^2
for i in d:
out.append(c/(i*i)+ddot)
#endfor
return out
#enddef
#%% PARAMS
global pelvisWidth, thighLength, shankLength
pelvisWidth = 0.20
thighLength = 0.40
shankLength = 0.435
#%% PARAMS 2 CHANGE
# realistic mass ratios: https://www.d.umn.edu/~mlevy/CLASSES/ESAT3300/LABS/LAB8_COM/bsp.htm
# Thigh mass percent (M/F): 10.50/11.75
# Shank mass percent (M/F): 4.75/5.35
# Thigh mass center (M/F): 43.3/42.8
# Shank mass center (M/F): 43.4/41.9
# foot mass & mass center (M/F): 1.43/1.33 & 50/50
# bn
rThighI = osim.Inertia(2,2,0.02,0,0,0)
lThighI = osim.Inertia(2,2,0.02,0,0,0)
rShankI = osim.Inertia(1,1,1,0,0,0)
lShankI = osim.Inertia(1,1,1,0,0,0)
rTm = 1
lTm = 1
rSm = 1
lSm = 1
# HUNTCROSLEY forces (6 contact spheres)
stiffness = [1000000,1000000,1000000,1000000,1000000,1000000]
stiffness = [1000000,1000000,1000000,1000000,1000000,1000000]
dissipation = [2.0,2.0,2.0,2.0,2.0,2.0]
staticFriction = [0.8,0.8,0.8,0.8,0.8,0.8]
dynamicFriction = [0.4,0.4,0.4,0.4,0.4,0.4]
viscousFriction = [0.4,0.4,0.4,0.4,0.4,0.4]
transitionVelocity = [0.2,0.2,0.2,0.2,0.2,0.2]
# TORQUE LIMIT (4 limits)
upperStiffness = [0.5,0.5,0.5,0.5]
lowerStiffness = [0.5,0.5,0.5,0.5]
kneeUpperLimit = [0,0,0,0]
kneeLowerLimit = [-140,-140,-140,-140]
hipUpperLimit = [100,100,100,100]
hipLowerLimit = [-100,-100,-100,-100]
damping = [0.025,0.025,0.025,0.025]
transition = [5,5,5,5]
#%% MAIN
if __name__ == '__main__':
# Load a Model from file
myModel = osim.Model()
myModel.setName('DynamicWalkerModel')
ground = myModel.get_ground()
myModel.set_gravity(Vec3(0, -9.80665, 0))
# TODO: Construct Bodies and Joints Here
platform = osim.Body()
platform.setName('Platform')
platform.setMass(1)
platform.setInertia(osim.Inertia(1,1,1,0,0,0))
platformGeometry = osim.Brick(Vec3(10,0.05,1))
platformGeometry.setColor(Vec3(0.8,0.1,0.1))
platform.attachGeometry(platformGeometry)
# add body to model
myModel.addBody(platform)
# Section: Create the Platform Joint
# Make and add a Pin joint for the Platform Body
locationInParent = Vec3(0,0,0)
orientationInParent = Vec3(0,0,0)
locationInChild = Vec3(0,0,0)
orientationInChild = Vec3(0,0,0)
platformToGround = osim.PinJoint('PlatformToGround', # Joint Name
ground, # Parent Frame
locationInParent, # Translation in Parent Frame
orientationInParent, # Orientation in Parent Frame
platform, # Child Frame
locationInChild, # Translation in Child Frame
orientationInChild) # Orientation in Child Frame
# Add the PlatformToGround Joint to the Model
myModel.addJoint(platformToGround)
# TODO: Set the coordinate properties of the Pin Joint
platform_rz = platformToGround.upd_coordinates(0)
platform_rz.setRangeMax(np.deg2rad(100))
platform_rz.setRangeMax(np.deg2rad(-100))
platform_rz.setName('platform_rz')
platform_rz.setDefaultValue(np.deg2rad(-10))
platform_rz.setDefaultSpeedValue(0)
platform_rz.setDefaultLocked(True)
# Section: Create the Pelvis
# Make and add a Pelvis Body
pelvis = osim.Body()
pelvis.setName('Pelvis')
pelvis.setMass(1)
pelvis.setInertia(osim.Inertia(1,1,1,0,0,0))
# Add geometry for display
pelvis.attachGeometry(osim.Sphere(pelvisWidth))
# Add Body to the Model
myModel.addBody(pelvis)
# Make and add a Planar joint for the Pelvis Body
pelvisToPlatform = osim.PlanarJoint('PelvisToPlatform', platform, pelvis)
# Update the coordinates of the new joint
Pelvis_rz = pelvisToPlatform.updCoordinate(0) # Rotation about z
Pelvis_rz.setRangeMax(np.pi)
Pelvis_rz.setRangeMin(-np.pi)
Pelvis_rz.setName('Pelvis_rz')
Pelvis_rz.setDefaultValue(0)
Pelvis_tx = pelvisToPlatform.updCoordinate(1); # Translation about x
Pelvis_tx.setRangeMax(10)
Pelvis_tx.setRangeMin(-5)
Pelvis_tx.setName('Pelvis_tx')
Pelvis_tx.setDefaultValue(-2)
Pelvis_tx.setDefaultSpeedValue(0)
Pelvis_ty = pelvisToPlatform.updCoordinate(2); # Translation about y
Pelvis_ty.setRangeMax(5)
Pelvis_ty.setRangeMin(-5)
Pelvis_ty.setName('Pelvis_ty')
Pelvis_ty.setDefaultValue(thighLength + shankLength)
Pelvis_ty.setDefaultSpeedValue(0)
myModel.addJoint(pelvisToPlatform)
#%% LOWER LIMB BODIES
lowerLimbBodies = []
namesLLB = ['RightThigh','LeftThigh','RightShank','LeftShank']
massLLB = [rTm,lTm,rSm,lSm]
inertiaLLB = [rThighI,
lThighI,
rShankI,
lShankI]
geomLLB = [osim.Ellipsoid(thighLength/10,thighLength/2,thighLength/10),
osim.Ellipsoid(thighLength/10,thighLength/2,thighLength/10),
osim.Ellipsoid(shankLength/10,shankLength/2,shankLength/10),
osim.Ellipsoid(shankLength/10,shankLength/2,shankLength/10)]
for i in range(0,len(namesLLB)):
lowerLimbBodies.append(osim.Body())
lowerLimbBodies[i].setMassCenter(Vec3(0,0,0))
lowerLimbBodies[i].setName(namesLLB[i])
lowerLimbBodies[i].setMass(massLLB[i])
lowerLimbBodies[i].setInertia(inertiaLLB[i])
lowerLimbBodies[i].attachGeometry(geomLLB[i])
myModel.addBody(lowerLimbBodies[i])
#endfor
del namesLLB, massLLB, inertiaLLB, geomLLB
#%% LOWER LIMB JOINTS
lowerLimbJoints = []
pervBod = [pelvis,pelvis,lowerLimbBodies[0],lowerLimbBodies[1]]
childBod = [lowerLimbBodies[0],lowerLimbBodies[1],lowerLimbBodies[2],lowerLimbBodies[3]]
namesLLJ = ['RightThighToPelvis','LeftThighToPelvis','RightShankToThigh','LeftShankToThigh']
#%% JOINT COORDS
rotations_rz = []
rangeCoords = [(np.deg2rad(100),np.deg2rad(-100)),
(np.deg2rad(100),np.deg2rad(-100)),
(np.deg2rad(0),np.deg2rad(-100)),
(np.deg2rad(0),np.deg2rad(-100))]
nameCoords = ['RHip_rz','LHip_rz','RKnee_rz','LKnee_rz']
txName = ['RHip_tx','LHip_tx','RKnee_tx','LKnee_tx']
tyName = ['RHip_ty','LHip_ty','RKnee_ty','LKnee_ty']
defaultCoords = [np.deg2rad(30),
np.deg2rad(-10),
np.deg2rad(-30),
np.deg2rad(-30)]
tx_off = [0,0,0,0]
ty_off = [0,0,0,0]
locNpar = [Vec3(0,0,pelvisWidth),Vec3(0,0,-pelvisWidth),Vec3(0,-thighLength/2,0),Vec3(0,-thighLength/2,0)]
oriNpar = [Vec3(0,0,0),Vec3(0,0,0),Vec3(0,0,0),Vec3(0,0,0)]
locNchi = [Vec3(0,thighLength/2,0),Vec3(0,thighLength/2,0),Vec3(0,shankLength/2,0),Vec3(0,shankLength/2,0)]
oriNchi = [Vec3(0,0,0),Vec3(0,0,0),Vec3(0,0,0),Vec3(0,0,0)]
for i in range(0,len(namesLLJ)):
lowerLimbJoints.append(osim.PinJoint(namesLLJ[i],
pervBod[i],
locNpar[i],
oriNpar[i],
childBod[i],
locNchi[i],
oriNchi[i]))
rotations_rz = lowerLimbJoints[i].updCoordinate(0) # Rotation about z
rotations_rz.setRangeMax(rangeCoords[i][0])
rotations_rz.setRangeMin(rangeCoords[i][1])
rotations_rz.setName(nameCoords[i])
rotations_rz.setDefaultValue(defaultCoords[i])
myModel.addJoint(lowerLimbJoints[i])
#endfor
del childBod, pervBod, namesLLJ, rangeCoords, nameCoords, defaultCoords
# TODO: Construct ContactGeometry and HuntCorssleyForces Here
contactSphereRadius = 0.05
# Make a Contact Half Space
groundContactLocation = Vec3(0,0.025,0)
groundContactOrientation = Vec3(0,0,-1.57)
groundContactSpace = osim.ContactHalfSpace(groundContactLocation,
groundContactOrientation,
platform)
groundContactSpace.setName('PlatformContact')
myModel.addContactGeometry(groundContactSpace)
#%% SPHERE CONTACTS
contactSphere = []
cntctRadii = [contactSphereRadius,
contactSphereRadius,
contactSphereRadius,
contactSphereRadius,
contactSphereRadius,
contactSphereRadius]
cntctLocation = [Vec3(0,0,pelvisWidth),
Vec3(0,0,-pelvisWidth),
Vec3(0,shankLength/2,0),
Vec3(0,shankLength/2,0),
Vec3(0,-shankLength/2,0),
Vec3(0,-shankLength/2,0)]
cntctFrame = [pelvis,
pelvis,
lowerLimbBodies[2],
lowerLimbBodies[3],
lowerLimbBodies[2],
lowerLimbBodies[3]]
cntctName = ['RHipContact','LHipContact','RKneeContact','LKneeContact','RFootContact','LFootContact']
for i in range(0,len(cntctName)):
contactSphere.append(osim.ContactSphere())
contactSphere[i].setRadius(cntctRadii[i])
contactSphere[i].setLocation(cntctLocation[i])
contactSphere[i].setFrame(cntctFrame[i])
contactSphere[i].setName(cntctName[i])
myModel.addContactGeometry(contactSphere[i])
#endfor
#%% ADD HUNTCROSELEY FORCES
forcesHuntCrosley = []
forcesNames = ['RHipForce','LHipForce','RKneeForce','LKneeForce','RFootForce','LFootForce']
forcesGeom1 = ['RHipContact','LHipContact','RKneeContact','LKneeContact','RFootContact','LFootContact']
forcesGeom2 = ['PlatformContact','PlatformContact','RHipContact','LHipContact','RKneeContact','LKneeContact']
for i in range(0,len(forcesNames)):
forcesHuntCrosley.append(osim.HuntCrossleyForce())
forcesHuntCrosley[i].setName(forcesNames[i])
forcesHuntCrosley[i].addGeometry(forcesGeom1[i])
forcesHuntCrosley[i].addGeometry('PlatformContact')
forcesHuntCrosley[i].setStiffness(stiffness[i])
forcesHuntCrosley[i].setDissipation(dissipation[i])
forcesHuntCrosley[i].setStaticFriction(staticFriction[i])
forcesHuntCrosley[i].setDynamicFriction(dynamicFriction[i])
forcesHuntCrosley[i].setViscousFriction(viscousFriction[i])
forcesHuntCrosley[i].setTransitionVelocity(transitionVelocity[i])
myModel.addForce(forcesHuntCrosley[i])
#endfor
# TODO: Construct CoordinateLimitForces Here
# Define Coordinate Limit Force Parameters
limitTorque = []
torqueNames = ['RHipLimitTorque','LHipLimitTorque','RKneeLimitTorque','LKneeLimitTorque']
torqueCoords = ['RHip_rz','LHip_rz','RKnee_rz','LKnee_rz']
for i in range(0,len(torqueNames)):
limitTorque.append(osim.CoordinateLimitForce())
limitTorque[i].setName(torqueNames[i])
limitTorque[i].set_coordinate(torqueCoords[i])
limitTorque[i].setUpperStiffness(upperStiffness[i])
limitTorque[i].setLowerStiffness(lowerStiffness[i])
limitTorque[i].setUpperLimit(hipUpperLimit[i])
limitTorque[i].setLowerLimit(hipLowerLimit[i])
limitTorque[i].setDamping(damping[i])
limitTorque[i].setTransition(transition[i])
myModel.addForce(limitTorque[i])
#endfor
# Define magnet forces about knee joints (contraction)
magnetForce = []
magnetNames = ["RKnee_pointtopoint","LKnee_pointtopoint"]
body1 = [str(lowerLimbBodies[0].getName()),str(lowerLimbBodies[1].getName())]
point1 = [Vec3(0,-thighLength/2,0),Vec3(0,shankLength/2,0)]
body2 = [str(lowerLimbBodies[2].getName()),str(lowerLimbBodies[3].getName())]
point2 = [Vec3(0,-thighLength/2,0),Vec3(0,shankLength/2,0)]
expression = ['0.01/(d*d)','0.01/(d*d)']
d = np.arange(0.1,5,0.01)
y = funExp(d,0)
plt.figure("knee forces")
plt.plot(d,y)
plt.ylabel('Force output (N/m)')
plt.xlabel('Distance from thigh to shank')
plt.show()
for i in range(0,len(expression)):
print(f"Body 1: {body1[i]}")
print(f"Body 2: {body2[i]}")
magnetForce.append(osim.ExpressionBasedPointToPointForce())
magnetForce[i].setName(magnetNames[i])
magnetForce[i].setBody1Name(body1[i])
magnetForce[i].setBody2Name(body2[i])
magnetForce[i].setPoint1(point1[i])
magnetForce[i].setPoint2(point2[i])
magnetForce[i].setExpression(expression[i])
myModel.addForce(magnetForce[i])
#endfor
# Define magnet forces about knee joints (extension)
magnetForce = []
magnetNames = ["RKnee_dynP2P","LKnee_dynP2P"]
body1 = [str(lowerLimbBodies[0].getName()),str(lowerLimbBodies[1].getName())]
point1 = [Vec3(0,0,0),Vec3(0,0,0)]
body2 = [str(lowerLimbBodies[2].getName()),str(lowerLimbBodies[3].getName())]
point2 = [Vec3(0,0,0),Vec3(0,0,0)]
expression = ['-0.01*ddot','-0.01*ddot']
for i in range(0,len(expression)):
print(f"Body 1: {body1[i]}")
print(f"Body 2: {body2[i]}")
magnetForce.append(osim.ExpressionBasedPointToPointForce())
magnetForce[i].setName(magnetNames[i])
magnetForce[i].setBody1Name(body1[i])
magnetForce[i].setBody2Name(body2[i])
magnetForce[i].setPoint1(point1[i])
magnetForce[i].setPoint2(point2[i])
magnetForce[i].setExpression(expression[i])
myModel.addForce(magnetForce[i])
#endfor
# Initialize the System
state = myModel.initSystem()
# save the model to a file
try: mkdir(prjPath)
except FileExistsError: print(f'{prjPath} already exists')
myModel.printToXML(modSavePaths)
print(f'DynamicWalkerModel.osim printed!')
positions = []
bodies = []
for body in myModel.getBodyList():
print(body.getName())
# print(f"inertia: {body.get_inertia()}")
# print(f"mass center: {body.get_mass_center()}")
# print(f"mass: {body.get_mass()}")
pp = body.getPositionInGround(state)
# print(f"current position: {pp}")
positions.append(pp.to_numpy())
bodies.append(body)
print("\n")
#endfor
print(f"{np.linalg.norm(positions[2]-positions[4])}")
print(f"{np.linalg.norm(positions[3]-positions[5])}") | 1.984375 | 2 |
configs/common/cpu2006.py | KuroeKurose/gem5 | 0 | 12771695 | import os
import m5
from m5.objects import *
m5.util.addToPath('../common')
spec_dist = os.environ.get('M5_CPU2006', '/dist/m5/cpu2006')
binary_dir = spec_dist
data_dir = binary_dir
current_pid = 100
# 400.perlbench
def perlbench():
process = Process(pid=current_pid)
process.cwd = binary_dir + '400.perlbench/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'perlbench_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable] + ['-I./lib', 'checkspam.pl', '2500', '5', '25', '11', '150', '1', '1', '1', '1']
return process
#401.bzip2
def bzip2():
global current_pid
process = Process(pid=current_pid)
current_pid = current_pid + 1
process.cwd = binary_dir + '401.bzip2/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'bzip2_base.amd64-m64-gcc42-nn'
data = process.cwd+'input.program'
process.cmd = [process.executable] + [data, '280']
return process
#403.gcc
def gcc():
process = Process()
process.cwd = binary_dir + '403.gcc/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'gcc_base.amd64-m64-gcc42-nn'
data = process.cwd +'166.i'
output = process.cwd +'166.s'
process.cmd = [process.executable] + [data]+['-o',output]
return process
#410.bwaves
def bwaves():
process = Process()
process.cwd = binary_dir + '410.bwaves/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'bwaves_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]
return process
#416.gamess
def gamess():
prorcess=Process()
prorcess.cwd = binary_dir + '416.gamess/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
prorcess.executable = prorcess.cwd + 'gamess_base.amd64-m64-gcc42-nn'
prorcess.cmd = [prorcess.executable]
prorcess.input= prorcess.cwd + 'cytosine.2.config'
return prorcess
#429.mcf
def mcf():
process = Process()
process.cwd = binary_dir + '429.mcf/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'mcf_base.amd64-m64-gcc42-nn'
data = process.cwd+'inp.in'
process.cmd = [process.executable] + [data]
return process
#433.milc
def milc():
process=Process()
process.cwd = binary_dir + '433.milc/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'milc_base.amd64-m64-gcc42-nn'
stdin=process.cwd +'su3imp.in'
process.cmd = [process.executable]
process.input=stdin
return process
#434.zeusmp
def zeusmp():
process=Process()
process.cwd = binary_dir+'434.zeusmp/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'zeusmp_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]
return process
#435.gromacs
def gromacs():
process = Process()
process.cwd = binary_dir+'435.gromacs/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'gromacs_base.amd64-m64-gcc42-nn'
data = process.cwd +'gromacs.tpr'
process.cmd = [process.executable] + ['-silent','-deffnm',data,'-nice','0']
return process
#436.cactusADM
def cactusADM():
process = Process()
process.cwd = binary_dir+'436.cactusADM/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'cactusADM_base.amd64-m64-gcc42-nn'
data = process.cwd+'benchADM.par'
process.cmd = [process.executable] + [data]
return process
# 437.leslie3d
def leslie3d():
process = Process()
process.cwd = binary_dir + '437.leslie3d/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'leslie3d_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]
process.input = process.cwd + 'leslie3d.in'
return process
#444.namd
def namd():
process = Process()
process.cwd = binary_dir + '444.namd/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'namd_base.amd64-m64-gcc42-nn'
input= process.cwd +'namd.input'
process.cmd = [process.executable] + ['--input',input,'--iterations','38','--output','namd.out']
return process
#445.gobmk
def gobmk():
process=Process()
process.cwd = binary_dir + '445.gobmk/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'gobmk_base.amd64-m64-gcc42-nn'
stdin= process.cwd +'nngs.tst'
process.cmd = [process.executable]+['--quiet','--mode','gtp']
process.input=stdin
return process
# 447.dealII TODO
#450.soplex
def soplex():
process=Process()
process.cwd = binary_dir + '450.soplex/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'soplex_base.amd64-m64-gcc42-nn'
data= process.cwd +'ref.mps'
process.cmd = [process.executable]+['-m3500',data]
return process
#453.povray
def povray():
process=Process()
process.cwd = binary_dir + '453.povray/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'povray_base.amd64-m64-gcc42-nn'
data = process.cwd +'SPEC-benchmark-ref.ini'
process.cmd = [process.executable]+[data]
return process
#454.calculix
def calculix():
process=Process()
process.cwd = binary_dir + '454.calculix/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'calculix_base.amd64-m64-gcc42-nn'
data = process.cwd +'hyperviscoplastic'
process.cmd = [process.executable]+['-i',data]
return process
#456.hmmer
def hmmer():
process=Process()
process.cwd = binary_dir + '456.hmmer/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'hmmer_base.amd64-m64-gcc42-nn'
data = process.cwd +'retro.hmm'
process.cmd = [process.executable]+['--fixed', '0', '--mean', '500', '--num', '500000', '--sd', '350', '--seed', '0', data]
return process
#458.sjeng
def sjeng():
process=Process()
process.cwd = binary_dir + '458.sjeng/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'sjeng_base.amd64-m64-gcc42-nn'
data= process.cwd +'ref.txt'
process.cmd = [process.executable]+[data]
return process
#459.GemsFDTD
def GemsFDTD():
process=Process()
process.cwd = binary_dir + '459.GemsFDTD/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'GemsFDTD_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]
return process
#462.libquantum
def libquantum():
process=Process()
process.cwd = binary_dir + '462.libquantum/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'libquantum_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable],'1397','8'
return process
#464.h264ref
def h264ref():
process=Process()
process.cwd = binary_dir + '464.h264ref/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'h264ref_base.amd64-m64-gcc42-nn'
data = process.cwd + 'foreman_ref_encoder_baseline.cfg'
process.cmd = [process.executable]+['-d',data]
return process
#470.lbm
def lbm():
process=Process()
process.cwd = binary_dir + '470.lbm/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'lbm_base.amd64-m64-gcc42-nn'
data= process.cwd +'100_100_130_ldc.of'
process.cmd = [process.executable]+['3000', 'reference.dat', '0', '0' ,data]
return process
#471.omnetpp
def omnetpp():
process=Process()
process.cwd = binary_dir + '471.omnetpp/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'omnetpp_base.amd64-m64-gcc42-nn'
data=process.cwd +'omnetpp.ini'
process.cmd = [process.executable]+[data]
return process
#473.astar
def astar():
process=Process()
process.cwd = binary_dir + '473.astar/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'astar_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]+['BigLakes2048.cfg']
return process
#481.wrf
def wrf():
process=Process()
process.cwd = binary_dir + '481.wrf/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'wrf_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]+['namelist.input']
return process
#482.sphinx3
def sphinx3():
process=Process()
process.cwd = binary_dir + '482.sphinx3/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'sphinx_livepretend_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable]+['ctlfile', '.', 'args.an4']
return process
#483.xalancbmk TODO
#998.specrand
def specrand_i():
process=Process()
process.cwd = binary_dir + '998.specrand/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd + 'specrand_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable] + ['1255432124','234923']
return process
#999.specrand
def specrand_f():
process=Process()
process.cwd = binary_dir + '999.specrand/run/run_base_ref_amd64-m64-gcc42-nn.0000/'
process.executable = process.cwd +'specrand_base.amd64-m64-gcc42-nn'
process.cmd = [process.executable] + ['1255432124','234923']
return process
| 2.015625 | 2 |
Allfiles/Labs/01-preprocess-data/script/preprocess-rapids.py | madiepev/mslearn-deep-learning | 2 | 12771696 | # import libraries
import os
import argparse
import cudf
import mlflow
# define functions
def main():
# Set the columns and their datatypes for conversion and parsing
cols = ['Flight_Number_Reporting_Airline', 'Year', 'Quarter', 'Month', 'DayOfWeek', 'DOT_ID_Reporting_Airline', 'OriginCityMarketID', 'DestCityMarketID', 'DepTime', 'DepDelay', 'DepDel15', 'ArrTime', 'ArrDelay', 'ArrDel15', 'CRSDepTime', 'CRSArrTime', 'AirTime', 'Distance', 'Reporting_Airline', 'IATA_CODE_Reporting_Airline', 'Origin', 'OriginCityName', 'Dest', 'DestCityName', 'Cancelled']
dtypes = {'Flight_Number_Reporting_Airline': 'float32', 'Year': 'float32', 'Quarter': 'float32', 'Month': 'float32', 'DayOfWeek': 'float32', 'DOT_ID_Reporting_Airline': 'float32', 'OriginCityMarketID': 'float32', 'DestCityMarketID': 'float32', 'DepTime': 'float32', 'DepDelay': 'float32', 'DepDel15': 'int', 'ArrTime': 'float32', 'ArrDelay': 'float32', 'ArrDel15': 'int', 'CRSDepTime': 'float32', 'CRSArrTime': 'float32', 'AirTime': 'float32', 'Distance': 'float32', 'Reporting_Airline': 'str', 'IATA_CODE_Reporting_Airline': 'str', 'Origin': 'str', 'OriginCityName': 'str', 'Dest': 'str', 'DestCityName': 'str', 'Cancelled': 'str'}
categorical_columns = ['Flight_Number_Reporting_Airline', 'DepTime', 'ArrTime', 'CRSDepTime', 'CRSArrTime', 'Reporting_Airline', 'Origin', 'OriginCityName', 'Dest', 'DestCityName', 'Airline']
# Process the full dataset and save it to file
processed_data = process_data(cols, dtypes, categorical_columns)
count_rows = len(processed_data)
mlflow.log_metric("processed rows", count_rows)
processed_data.to_csv('outputs/processed_data.csv', index=False)
# Define a function to process an entire dataset
def process_data(cols, dtypes, categorical_columns):
# Ingest - Read the CSV files into the DataFrame
data = cudf.read_csv('./data/airlines_raw.csv', cols=cols, dtypes=dtypes)[cols] # Read in data, ignoring any column not in cols
carriers = cudf.read_csv('./data/carriers.csv')
airports = cudf.read_csv('./data/airports.csv', usecols=['iata_code', 'latitude_deg', 'longitude_deg', 'elevation_ft'])
# Merge - Combine the external data with the airline data
data = cudf.merge(data, carriers, left_on='IATA_CODE_Reporting_Airline', right_on='Code', how='left')
data = cudf.merge(data, airports, left_on='Dest', right_on='iata_code', how='left')
data = cudf.merge(data, airports, left_on='Origin', right_on='iata_code', how='left')
# Rename - Add clarity to the combined dataset by renaming columns
data = data.rename(columns= { 'latitude_deg_x' : 'dest_lat', 'longitude_deg_x': 'dest_long',
'latitude_deg_y' : 'origin_lat', 'longitude_deg_y': 'origin_long',
'elevation_ft_x' : 'dest_elevation', 'elevation_ft_y' : 'origin_elevation',
'Description' : 'Airline'})
# Remove duplicates columns
data = data.drop(['iata_code_x', 'iata_code_y','IATA_CODE_Reporting_Airline', 'Code'], axis=1)
print(f'Added the following columns/features:\n { set(data.columns).difference(cols) }\n')
print(f'Data currently has {data.shape[0]} rows and {data.shape[1]} columns\n')
# Remove rows missing data
data = data.dropna()
print(f'Dropping rows with missing or NA values, data now has {data.shape[0]} rows and {data.shape[1]} columns\n')
# Encode - Convert human-readable names to corresponding computer-readable integers
encodings, mappings = data['OriginCityName'].factorize() # encode/categorize a sample feature
print("Example encoding:")
numeric_columns = []
for colname in data.columns:
if colname in categorical_columns:
values = data[colname].astype('category').cat.codes.astype('float32')
colname = 'enc_' + colname
data.insert(0, colname, values)
numeric_columns += [colname]
print(list(zip(data['OriginCityName'][0:3].values_host, encodings[0:3])))
# Remove redundant, surrogate, and unwanted columns from the data
remove_cols = set (['Year', 'Cancelled', 'DOT_ID_Reporting_Airline', 'enc_IATA_CODE_Reporting_Airline', 'ArrTime']);
output_columns = list(set(numeric_columns).difference(remove_cols))
# Add back additional columns that are used for data visualization, but not training
output_columns = output_columns + ['OriginCityName', 'DestCityName']
data = data[output_columns]
print(f'Encoded and removed extra columns, data now has {data.shape[0]} rows and {data.shape[1]} columns\n')
print(f'Removed: {remove_cols}')
print(f'Returning: {output_columns}')
return data
# run script
if __name__ == "__main__":
# add space in logs
print("\n\n")
print("*" * 60)
# run main function
main()
# add space in logs
print("*" * 60)
print("\n\n")
| 2.5625 | 3 |
Lab2_ALG/src/L2Q03.py | DanialAroff/WIA2005---Lab | 0 | 12771697 | <reponame>DanialAroff/WIA2005---Lab<filename>Lab2_ALG/src/L2Q03.py
# Runway Reservation System
import time
from BinarySearchTree import BST
R = [None] # set of landing request
t = 0
tree = BST()
print('Runway Reservation System\n')
while True:
tw = int(input('Input landing time request: '))
if tw < 0:
break
else:
if R[0] is not None:
R.append(tw)
else:
R[0] = tw
print(R)
# while True:
# if tree.search(t):
# print('Reference time t(now): ' + str(t))
# t = t + 1
# time.sleep(0.5)
| 2.96875 | 3 |
src/lib/Bcfg2/Server/Plugins/Cfg/CfgCatFilter.py | amplify-education/bcfg2 | 0 | 12771698 | <gh_stars>0
import logging
import Bcfg2.Server.Plugin
from Bcfg2.Server.Plugins.Cfg import CfgFilter
logger = logging.getLogger(__name__)
class CfgCatFilter(CfgFilter):
__extensions__ = ['cat']
def modify_data(self, entry, metadata, data):
datalines = data.strip().split('\n')
for line in self.data.split('\n'):
if not line:
continue
if line.startswith('+'):
datalines.append(line[1:])
elif line.startswith('-'):
if line[1:] in datalines:
datalines.remove(line[1:])
return "\n".join(datalines) + "\n"
| 2.640625 | 3 |
testingCentre/apps.py | p2titus/ContactTracing | 0 | 12771699 | from django.apps import AppConfig
class TestingCentreConfig(AppConfig):
name = 'testingCentre'
| 1.125 | 1 |
main.py | eth-sri/ACE | 6 | 12771700 | <reponame>eth-sri/ACE
import time
import json
import numpy as np
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from args_factory import get_args
from loaders import get_loaders
from utils import (get_net, Scheduler, Statistics, count_vars, write_config, get_scaled_eps, get_layers)
from relaxed_networks import RelaxedNetwork, CombinedNetwork
from networks import UpscaleNet
from trainer import train, test, get_opt, diffAI_cert
import random
seed = 100
torch.cuda.manual_seed(seed)
torch.manual_seed(seed)
torch.set_printoptions(precision=10)
np.random.seed(seed)
random.seed(seed)
def run(args=None):
device = 'cuda' if torch.cuda.is_available() and (not args.no_cuda) else 'cpu'
num_train, train_loader, test_loader, input_size, input_channel, n_class = get_loaders(args)
lossFn = nn.CrossEntropyLoss(reduction='none')
evalFn = lambda x: torch.max(x, dim=1)[1]
net = get_net(device, args.dataset, args.net, input_size, input_channel, n_class, load_model=args.load_model,
net_dim=args.cert_net_dim)#, feature_extract=args.core_feature_extract)
timestamp = int(time.time())
model_signature = '%s/%s/%d/%s_%.5f/%d' % (args.dataset, args.exp_name, args.exp_id, args.net, args.train_eps, timestamp)
model_dir = args.root_dir + 'models_new/%s' % (model_signature)
args.model_dir = model_dir
count_vars(args, net)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
if isinstance(net, UpscaleNet):
relaxed_net = None
relu_ids = None
else:
relaxed_net = RelaxedNetwork(net.blocks, args.n_rand_proj).to(device)
relu_ids = relaxed_net.get_relu_ids()
if "nat" in args.train_mode:
cnet = CombinedNetwork(net, relaxed_net, lossFn=lossFn, evalFn=evalFn, device=device, no_r_net=True).to(device)
else:
dummy_input = torch.rand((1,)+net.dims[0],device=device, dtype=torch.float32)
cnet = CombinedNetwork(net, relaxed_net, lossFn=lossFn, evalFn=evalFn, device=device, dummy_input=dummy_input).to(device)
n_epochs, test_nat_loss, test_nat_acc, test_adv_loss, test_adv_acc = args.n_epochs, None, None, None, None
if 'train' in args.train_mode:
tb_writer = SummaryWriter(model_dir)
stats = Statistics(len(train_loader), tb_writer, model_dir)
args_file = os.path.join(model_dir, 'args.json')
with open(args_file, 'w') as fou:
json.dump(vars(args), fou, indent=4)
write_config(args, os.path.join(model_dir, 'run_config.txt'))
eps = 0
epoch = 0
lr = args.lr
n_epochs = args.n_epochs
if "COLT" in args.train_mode:
relu_stable = args.relu_stable
# if args.layers is None:
# args.layers = [-2, -1] + relu_ids
layers = get_layers(args.train_mode, cnet, n_attack_layers=args.n_attack_layers, protected_layers=args.protected_layers)
elif "adv" in args.train_mode:
relu_stable = None
layers = [-1, -1]
args.mix = False
elif "natural" in args.train_mode:
relu_stable = None
layers = [-2, -2]
args.nat_factor = 1
args.mix = False
elif "diffAI" in args.train_mode:
relu_stable = None
layers = [-2, -2]
else:
assert False, "Unknown train mode %s" % args.train_mode
print('Saving model to:', model_dir)
print('Training layers: ', layers)
for j in range(len(layers)-1):
opt, lr_scheduler = get_opt(cnet.net, args.opt, lr, args.lr_step, args.lr_factor, args.n_epochs,
train_loader, args.lr_sched, fixup="fixup" in args.net)
curr_layer_idx = layers[j+1]
eps_old = eps
eps = get_scaled_eps(args, layers, relu_ids, curr_layer_idx, j)
kappa_sched = Scheduler(0.0 if args.mix else 1.0, 1.0, num_train * args.mix_epochs, 0)
beta_sched = Scheduler(args.beta_start if args.mix else args.beta_end, args.beta_end,
args.train_batch * len(train_loader) * args.mix_epochs, 0)
eps_sched = Scheduler(eps_old if args.anneal else eps, eps, num_train * args.anneal_epochs, 0)
layer_dir = '{}/{}'.format(model_dir, curr_layer_idx)
if not os.path.exists(layer_dir):
os.makedirs(layer_dir)
print('\nnew train phase: eps={:.5f}, lr={:.2e}, curr_layer={}\n'.format(eps, lr, curr_layer_idx))
for curr_epoch in range(n_epochs):
train(device, epoch, args, j+1, layers, cnet, eps_sched, kappa_sched, opt, train_loader,
lr_scheduler, relu_ids, stats, relu_stable,
relu_stable_protected=args.relu_stable_protected, beta_sched=beta_sched)
if isinstance(lr_scheduler, optim.lr_scheduler.StepLR) and curr_epoch >= args.mix_epochs:
lr_scheduler.step()
if (epoch + 1) % args.test_freq == 0:
with torch.no_grad():
test_nat_loss, test_nat_acc, test_adv_loss, test_adv_acc = test(device, args, cnet,
test_loader if args.test_set == "test" else train_loader,
[curr_layer_idx], stats=stats, log_ind=(epoch + 1) % n_epochs == 0)
if (epoch + 1) % args.test_freq == 0 or (epoch + 1) % n_epochs == 0:
torch.save(net.state_dict(), os.path.join(layer_dir, 'net_%d.pt' % (epoch + 1)))
torch.save(opt.state_dict(), os.path.join(layer_dir, 'opt_%d.pt' % (epoch + 1)))
stats.update_tb(epoch)
epoch += 1
relu_stable = None if relu_stable is None else relu_stable * args.relu_stable_layer_dec
lr = lr * args.lr_layer_dec
if args.cert:
with torch.no_grad():
diffAI_cert(device, args, cnet, test_loader if args.test_set == "test" else train_loader, stats=stats,
log_ind=True, epoch=epoch, domains=args.cert_domain)
elif args.train_mode == 'print':
print('printing network to:', args.out_net_file)
dummy_input = torch.randn(1, input_channel, input_size, input_size, device='cuda')
net.skip_norm = True
torch.onnx.export(net, dummy_input, args.out_net_file, verbose=True)
elif args.train_mode == 'test':
with torch.no_grad():
test(device, args, cnet, test_loader if args.test_set == "test" else train_loader, [-1], log_ind=True)
elif args.train_mode == "cert":
tb_writer = SummaryWriter(model_dir)
stats = Statistics(len(train_loader), tb_writer, model_dir)
args_file = os.path.join(model_dir, 'args.json')
with open(args_file, 'w') as fou:
json.dump(vars(args), fou, indent=4)
write_config(args, os.path.join(model_dir, 'run_config.txt'))
print('Saving results to:', model_dir)
with torch.no_grad():
diffAI_cert(device, args, cnet, test_loader if args.test_set == "test" else train_loader, stats=stats,
log_ind=True, domains=args.cert_domain)
exit(0)
else:
assert False, 'Unknown mode: {}!'.format(args.train_mode)
return test_nat_loss, test_nat_acc, test_adv_loss, test_adv_acc
def main():
args = get_args()
run(args=args)
if __name__ == '__main__':
main()
| 1.882813 | 2 |
tests/builtins/test_oct.py | Mariatta/voc | 1 | 12771701 | from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class OctTests(TranspileTestCase):
pass
class BuiltinOctFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["oct"]
not_implemented = [
'test_bytearray',
'test_bytes',
'test_class',
'test_complex',
'test_dict',
'test_frozenset',
'test_set',
]
| 1.65625 | 2 |
Machine-Learning-in-Production/Includes/Drift-Monitoring-Setup.py | databricks-academy/ml-in-production | 14 | 12771702 | # Databricks notebook source
import numpy as np
import pandas as pd
from scipy import stats
# COMMAND ----------
# Simulate original ice cream dataset
df = pd.DataFrame()
df['temperature'] = np.random.uniform(60, 80, 1000)
df['number_of_cones_sold'] = np.random.uniform(0, 20, 1000)
flavors = ["Vanilla"] * 300 + ['Chocolate'] * 200 + ['Cookie Dough'] * 300 + ['Coffee'] * 200
np.random.shuffle(flavors)
df['most_popular_ice_cream_flavor'] = flavors
df['number_bowls_sold'] = np.random.uniform(0, 20, 1000)
sorbet = ["Raspberry "] * 250 + ['Lemon'] * 250 + ['Lime'] * 250 + ['Orange'] * 250
np.random.shuffle(sorbet)
df['most_popular_sorbet_flavor'] = sorbet
df['total_store_sales'] = np.random.normal(100, 10, 1000)
df['total_sales_predicted'] = np.random.normal(100, 10, 1000)
# Simulate new ice cream dataset
df2 = pd.DataFrame()
df2['temperature'] = (df['temperature'] - 32) * (5/9) # F -> C
df2['number_of_cones_sold'] = np.random.uniform(0, 20, 1000) #stay same
flavors = ["Vanilla"] * 100 + ['Chocolate'] * 300 + ['Cookie Dough'] * 400 + ['Coffee'] * 200
np.random.shuffle(flavors)
df2['most_popular_ice_cream_flavor'] = flavors
df2['number_bowls_sold'] = np.random.uniform(10, 30, 1000)
sorbet = ["Raspberry "] * 200 + ['Lemon'] * 200 + ['Lime'] * 200 + ['Orange'] * 200 + [None] * 200
np.random.shuffle(sorbet)
df2['most_popular_sorbet_flavor'] = sorbet
df2['total_store_sales'] = np.random.normal(150, 10, 1000) # increased
df2['total_sales_predicted'] = np.random.normal(80, 10, 1000) # decreased
| 3.078125 | 3 |
api/accounts/base/serializers.py | klebed/esdc-ce | 97 | 12771703 | <reponame>klebed/esdc-ce<gh_stars>10-100
from api import serializers as s
from api.authtoken.serializers import AuthTokenSerializer
class APIAuthTokenSerializer(AuthTokenSerializer):
"""
Add some more validation into the AuthTokenSerializer.
"""
def validate(self, attrs):
attrs = super(APIAuthTokenSerializer, self).validate(attrs)
if not attrs['user'].api_access:
raise s.ValidationError('User account is not allowed to access API.')
return attrs
| 2.421875 | 2 |
ch4/magic8.py | rfreiberger/Automate-the-Boring-Stuff | 0 | 12771704 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 14 22:11:14 2016
@author: Rob
"""
import random
messages = ['It is certain',
'It is decidedly so',
'Yes definitely',
'Reply hazy try again',
'Ask again later',
'Concentrate and ask again',
'My reply is no',
'Outlook not so good',
'Very doubtful']
print(messages[random.randint(0, len(messages) -1)])
| 3.53125 | 4 |
python/src/keras/resnet.py | d-ikeda-sakurasoft/deep-learning | 0 | 12771705 | <reponame>d-ikeda-sakurasoft/deep-learning
import numpy as np
import matplotlib.pylab as plt
from keras.datasets import *
from keras.utils import *
from keras.models import *
from keras.layers import *
#データ
(x_train, y_train),(x_test, y_test) = cifar100.load_data()
labels = np.max(y_train) + 1
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = to_categorical(y_train, labels)
y_test = to_categorical(y_test, labels)
#モデル
inputs = Input(shape=x_train.shape[1:])
f = 64
ki = 'he_normal'
kr = regularizers.l2(1e-11)
x = Conv2D(filters=f, kernel_size=7, padding='same', kernel_initializer=ki, kernel_regularizer=kr)(inputs)
x = MaxPooling2D(pool_size=2)(x)
n = 5
for i in range(n):
shortcut = x
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(rate=0.3)(x)
x = Conv2D(filters=f*(2**i), kernel_size=1, padding='same', kernel_initializer=ki, kernel_regularizer=kr)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters=f*(2**i), kernel_size=3, padding='same', kernel_initializer=ki, kernel_regularizer=kr)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(filters=f*(2**(i+2)), kernel_size=1, padding='same', kernel_initializer=ki, kernel_regularizer=kr)(x)
x = Concatenate()([x, shortcut])
if i != (n - 1):
x = MaxPooling2D(pool_size=2)(x)
x = GlobalAveragePooling2D()(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(rate=0.4)(x)
x = Dense(units=labels, kernel_initializer=ki, kernel_regularizer=kr)(x)
x = BatchNormalization()(x)
x = Activation('softmax')(x)
x = Dropout(rate=0.4)(x)
model = Model(inputs=inputs, outputs=x)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#訓練
epochs = 50
history = model.fit(x=x_train, y=y_train, epochs=epochs, batch_size=100, validation_split=0.2)
#グラフ
epochs = range(epochs)
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
plt.plot(epochs, history.history['acc'], label='training')
plt.plot(epochs, history.history['val_acc'], label='validation')
plt.title('acc')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(epochs, history.history['loss'], label='training')
plt.plot(epochs, history.history['val_loss'], label='validation')
plt.title('loss')
plt.legend()
plt.show()
#テスト
score = model.evaluate(x=x_test, y=y_test)
print('test_loss:', score[0])
print('test_acc:', score[1])
| 2.890625 | 3 |
src/connectionWindow.py | insoPL/QtDraughts | 0 | 12771706 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QVBoxLayout, QDialog, QLineEdit, QHBoxLayout, QPushButton, QProgressDialog
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtCore import pyqtSignal, Qt
from network import Server, Client
import logging
import socket
class ConnectionWindow(QDialog):
got_connection = pyqtSignal()
def __init__(self):
super().__init__()
self.setWindowFlags(self.windowFlags() ^ Qt.WindowContextHelpButtonHint)
self.setWindowIcon(QIcon(':/graphics\internet.png'))
self.setWindowTitle("Multiplayer")
self.resize(400, 300)
grid = QVBoxLayout()
grid.addLayout(self.client())
grid.addLayout(self.server())
grid.addStretch(1)
self.setLayout(grid)
self.waiting_window = None
self.connection = None
def server(self):
self.server_ip_address = QLineEdit(socket.gethostbyname(socket.gethostname()))
self.server_port = QLineEdit("25565")
self.server_port.setMaximumWidth(80)
self.server_password = QLineEdit("password")
host_button = QPushButton("Host")
host_button.clicked.connect(self.host_button_clicked)
hbox = QHBoxLayout()
hbox.addWidget(self.server_ip_address)
hbox.addWidget(self.server_port)
hbox.addWidget(self.server_password)
hbox.addWidget(host_button)
return hbox
def host_button_clicked(self):
if self.connection:
return
self.connection = Server()
self.waiting_window = QProgressDialog("Waiting for network...", "Cancel", 0, 0)
self.waiting_window.setWindowTitle("Waiting")
self.waiting_window.setWindowIcon(QIcon(':/graphics\internet.png'))
self.waiting_window.setWindowFlags(self.waiting_window.windowFlags() ^ Qt.WindowContextHelpButtonHint)
self.connection.got_connection.connect(self.waiting_window.deleteLater)
self.connection.got_connection.connect(self.got_connection)
self.connection.got_connection.connect(self.deleteLater)
self.connection.connection_error.connect(self.connection_error)
self.connection.connection_error.connect(self.waiting_window.deleteLater)
self.waiting_window.canceled.connect(self.connection.close)
self.connection.start(self.server_ip_address.text(), self.server_port.text(), self.server_password.text())
self.waiting_window.exec()
def client(self):
self.client_ip_address = QLineEdit(socket.gethostbyname(socket.gethostname()))
self.client_port = QLineEdit("25565")
self.client_port.setMaximumWidth(80)
self.client_password = QLineEdit("password")
connect_button = QPushButton("Connect")
connect_button.clicked.connect(self.connect_button_clicked)
hbox = QHBoxLayout()
hbox.addWidget(self.client_ip_address)
hbox.addWidget(self.client_port)
hbox.addWidget(self.client_password)
hbox.addWidget(connect_button)
return hbox
def connect_button_clicked(self):
if self.connection:
return
self.connection = Client()
self.waiting_window = QProgressDialog("Waiting for server...", "Cancel", 0, 0)
self.waiting_window.setWindowTitle("Connecting")
self.waiting_window.setWindowFlags(self.waiting_window.windowFlags() ^ Qt.WindowContextHelpButtonHint)
self.waiting_window.setWindowIcon(QIcon(':/graphics\internet.png'))
self.connection.got_connection.connect(self.waiting_window.close)
self.connection.got_connection.connect(self.got_connection)
self.connection.got_connection.connect(self.deleteLater)
self.connection.connection_error.connect(self.connection_error)
self.connection.connection_error.connect(self.waiting_window.deleteLater)
self.connection.start(self.client_ip_address.text(), self.client_port.text(), self.client_password.text())
self.waiting_window.exec()
def connection_error(self, err):
QMessageBox.warning(self, 'Connection Error', " "+err+" ")
| 2.5625 | 3 |
logic/dimreduce/sax.py | CityPulse/KAT | 4 | 12771707 | from __future__ import division
__author__ = '<NAME>'
import numpy as np
from scipy.stats import norm
import string
import bottleneck as bn
import math
# paa tranformation, window = incoming data, string_length = length of outcoming data
class sax():
def process(self, window, output_length, sax_vocab):
sax = to_sax(to_paa(normalize(window),output_length),sax_vocab)
#return vocabToCoordinates(len(window),output_length,sax[0],4)
return vocabToCoordinates(output_length,output_length,sax[0],sax_vocab)
def getConfigurationParams(self):
return {"output_length":"100","sax_vocab":"4"}
def normalize(data):
data2 = np.array(data)
data2 = data2 - (np.mean(data))
data2 = data2 /data2.std()
return data2
def to_paa(data,string_length):
data = np.array_split(data, string_length)
return [np.mean(section) for section in data]
def gen_breakpoints(symbol_count):
breakpoints = norm.ppf(np.linspace(1. / symbol_count, 1 - 1. / symbol_count, symbol_count - 1))
breakpoints = np.concatenate((breakpoints, np.array([np.Inf])))
return breakpoints
def to_sax(data,symbol_count):
breakpoints = gen_breakpoints(symbol_count)
locations = [np.where(breakpoints > section_mean)[0][0] for section_mean in data]
return [''.join([string.ascii_letters[ind] for ind in locations])]
def vocabToCoordinates(time_window, phrase_length, phrases, symbol_count):
breakpoints = gen_breakpoints(symbol_count)
newCutlines = breakpoints.tolist()
max_value = breakpoints[symbol_count - 2] + ((breakpoints[symbol_count - 2] - breakpoints[symbol_count - 3]) * 2)
# HERE IS SOMETHING WRONG // ONLY IN VISUALISATION
min_value = breakpoints[0] - ((breakpoints[1] - breakpoints[0]) * 2)
infi = newCutlines.pop()
newCutlines.append(max_value)
newCutlines.append(infi)
newCutlines.insert(0, min_value)
#newCutlines.insert(0,-np.Inf)
co1 = time_window / float(phrase_length)
g = 0
retList = []
for s in phrases:
if s is "#":
for i in range(int(co1)):
retList.append(np.NaN)
g+=1
else:
for i in range(int(co1)):
retList.append(newCutlines[ord(s) - 97])
g+=1
#print co1,time_window,phrase_length,g,len(phrases)
return retList
def convertSaxBackToContinious(string_length, symbol_count, data):
points, phrases = norm(data,string_length, symbol_count)
retList = vocabToCoordinates(data, string_length, phrases, points, symbol_count)
#print phrases[0]
return retList
def saxDistance(w1, w2,original_length,symbol_count):
if len(w1) != len(w2):
raise Exception("not equal string length")
string_length=len(w1)
dist = 0
for (l, k) in zip(w1, w2):
dist += saxDistanceLetter(l, k,symbol_count)
result = np.sqrt(dist) * np.sqrt(np.divide(original_length, string_length))
return result
def saxDistanceLetter(w1, w2, symbol_count):
n1 = ord(w1) - 97
n2 = ord(w2) - 97
lookupTable= createLookup(symbol_count,gen_breakpoints(symbol_count))
if n1 > symbol_count:
raise Exception(" letter not in Dictionary " + w1)
if n2 > symbol_count:
raise Exception(" letter not in Dictionary " + w2)
return lookupTable[n1][n2]
def createLookup(symbol_count, breakpoints):
return make_matrix(symbol_count, symbol_count, breakpoints)
def make_list(row, size, breakpoints):
mylist = []
for i in range(size):
i = i + 1
if abs(row - i) <= 1:
mylist.append(0)
else:
v = breakpoints[(max(row, i) - 2)] - breakpoints[min(row, i) - 1]
mylist.append(v)
return mylist
def make_matrix(rows, cols, breakpoints):
matrix = []
for i in range(rows):
i = i + 1
matrix.append(make_list(i, cols, breakpoints))
return matrix | 2.625 | 3 |
api/app/urls/address_url.py | LIttle-soul/LSOJ | 0 | 12771708 | from django.urls import path
from app.view.address_view import *
urlpatterns = [
path('synchronizeaddress/', SynchronizeAddress.as_view(), name='SynchronizeAddress'),
path('getprovince/', GetProvince.as_view(), name='GetProvince'),
path('getmunicipality/', GetMunicipality.as_view(), name='GetMunicipality'),
path('getaddresslist/', GetAddressList.as_view(), name='GetAddressList'),
path('getaddressmessage/', GetAddressMessage.as_view(), name='getAddressMessage'),
]
| 1.804688 | 2 |
scripts/models/covidNet.py | anudeepsekhar/Lane-Detection-Pytorch | 1 | 12771709 | <filename>scripts/models/covidNet.py<gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from models.layers import denseBlock2, convBlock, unetUp, outconv
class Net(nn.Module):
"""Some Information about Net"""
def __init__(self, in_channels=3, n_classes=2, feature_scale=2, is_deconv=True, is_batchnorm=True):
super(Net, self).__init__()
self.is_deconv = is_deconv
self.is_batchnorm = is_batchnorm
self.lowconv = nn.Conv2d(in_channels, 32, 3, 1, 1)
self.relu = nn.ReLU(inplace=True)
self.dense1 = denseBlock2(32, 128, 32, 3)
self.maxpool1 = nn.MaxPool2d(2)
self.dense2 = denseBlock2(128, 256, 32, 3)
self.maxpool2 = nn.MaxPool2d(2)
self.dense3 = denseBlock2(224, 352, 32, 4)
self.maxpool3 = nn.MaxPool2d(2)
self.dense4 = denseBlock2(352, 576, 32, 6)
self.maxpool4 = nn.MaxPool2d(2)
self.trasit1 = convBlock(128,68)
self.trasit2 = convBlock(224,136)
self.trasit3 = convBlock(352,272)
self.bottleneck = nn.Conv2d(544, 1088, 3, 1, 1)
self.up_cat1 = unetUp(1088, 544, self.is_deconv)
self.up_cat2 = unetUp(544, 272, self.is_deconv)
self.up_cat3 = unetUp(272, 136, self.is_deconv)
self.up_cat4 = unetUp(136, 68, self.is_deconv)
self.outconv = outconv(68, n_classes)
def forward(self, x):
c1 = self.lowconv(x)
c1 = self.relu(c1)
# print(c1.shape)
d1 = self.dense1(c1)
d1_ = self.trasit1(d1)
# print(d1.shape)
m1 = self.maxpool1(d1)
# print(m1.shape)
d2 = self.dense2(m1)
d2_ = self.trasit2(d2)
# print(d2.shape)
m2 = self.maxpool2(d2)
# print(m2.shape)
d3 = self.dense3(m2)
d3_ = self.trasit3(d3)
# print(d3.shape)
m3 = self.maxpool3(d3)
# print(m3.shape)
d4 = self.dense4(m3)
# print(d4.shape)
m4 = self.maxpool4(d4)
# print(m4.shape)
bn = self.bottleneck(m4)
# print(bn.shape)
up1 = self.up_cat1(bn, d4)
# print(up1.shape)
up2 = self.up_cat2(up1, d3_)
# print(up2.shape)
up3 = self.up_cat3(up2, d2_)
# print(up3.shape)
up4 = self.up_cat4(up3, d1_)
# print(up4.shape)
out = self.outconv(up4)
# print(out.shape)
return out
class Net2(nn.Module):
"""Some Information about Net"""
def __init__(self, in_channels=1, n_classes=2, feature_scale=2, is_deconv=True, is_batchnorm=True):
super(Net2, self).__init__()
self.is_deconv = is_deconv
self.is_batchnorm = is_batchnorm
self.lowconv = nn.Conv2d(in_channels, 32, 3, 1, 1)
self.relu = nn.ReLU(inplace=True)
self.dense1 = denseBlock2(32, 64, 32, 1)
self.maxpool1 = nn.MaxPool2d(2)
self.dense2 = denseBlock2(64, 128, 32, 2)
self.maxpool2 = nn.MaxPool2d(2)
self.dense3 = denseBlock2(128, 256, 32, 4)
self.maxpool3 = nn.MaxPool2d(2)
self.dense4 = denseBlock2(256, 512, 32, 8)
self.maxpool4 = nn.MaxPool2d(2)
# self.trasit1 = convBlock(128,68)
# self.trasit2 = convBlock(224,136)
# self.trasit3 = convBlock(352,272)
self.bottleneck = nn.Conv2d(512, 1024, 3, 1, 1)
self.up_cat1 = unetUp(1024, 512, self.is_deconv)
self.up_cat2 = unetUp(512, 256, self.is_deconv)
self.up_cat3 = unetUp(256, 128, self.is_deconv)
self.up_cat4 = unetUp(128, 64, self.is_deconv)
self.outconv = outconv(64, n_classes)
def forward(self, x):
c1 = self.lowconv(x)
c1 = self.relu(c1)
# print(c1.shape)
d1 = self.dense1(c1)
# d1_ = self.trasit1(d1)
# print(d1.shape)
m1 = self.maxpool1(d1)
# print(m1.shape)
d2 = self.dense2(m1)
# d2_ = self.trasit2(d2)
# print(d2.shape)
m2 = self.maxpool2(d2)
# print(m2.shape)
d3 = self.dense3(m2)
# d3_ = self.trasit3(d3)
# print(d3.shape)
m3 = self.maxpool3(d3)
# print(m3.shape)
d4 = self.dense4(m3)
# print(d4.shape)
m4 = self.maxpool4(d4)
# print(m4.shape)
bn = self.bottleneck(m4)
# print(bn.shape)
up1 = self.up_cat1(bn, d4)
# print(up1.shape)
up2 = self.up_cat2(up1, d3)
# print(up2.shape)
up3 = self.up_cat3(up2, d2)
# print(up3.shape)
up4 = self.up_cat4(up3, d1)
# print(up4.shape)
out = self.outconv(up4)
# print(out.shape)
return out
class Net3(nn.Module):
"""Some Information about Net"""
def __init__(self, in_channels=3, n_classes=1, feature_scale=2, is_deconv=False, is_batchnorm=True):
super(Net3, self).__init__()
self.is_deconv = is_deconv
self.is_batchnorm = is_batchnorm
self.lowconv = nn.Conv2d(in_channels, 32, 3, 1, 1)
self.relu = nn.ReLU(inplace=True)
self.dense1 = denseBlock2(32, 64, 8, 4)
self.maxpool1 = nn.MaxPool2d(2)
self.dense2 = denseBlock2(64, 96, 8, 4)
self.maxpool2 = nn.MaxPool2d(2)
self.dense3 = denseBlock2(96, 128, 8, 4)
self.maxpool3 = nn.MaxPool2d(2)
self.dense4 = denseBlock2(128, 160, 8, 4)
self.maxpool4 = nn.MaxPool2d(2)
self.trasit1 = nn.Conv2d(64,40, kernel_size=1, padding=0, stride=1)
self.trasit2 = nn.Conv2d(96,80, kernel_size=1, padding=0, stride=1)
self.trasit3 = nn.Conv2d(128,160, kernel_size=1, padding=0, stride=1)
self.trasit4 = nn.Conv2d(160,320, kernel_size=1, padding=0, stride=1)
self.bottleneck = nn.Conv2d(160, 640, 3, 1, 1)
self.up_cat1 = unetUp(640, 320, self.is_deconv)
self.up_cat2 = unetUp(320, 160, self.is_deconv)
self.up_cat3 = unetUp(160, 80, self.is_deconv)
self.up_cat4 = unetUp(80, 40, self.is_deconv)
self.outconv = outconv(40, n_classes)
def forward(self, x):
c1 = self.lowconv(x)
c1 = self.relu(c1)
# print(c1.shape)
d1 = self.dense1(c1)
d1_ = self.trasit1(d1)
# print(d1.shape)
m1 = self.maxpool1(d1)
# print(m1.shape)
d2 = self.dense2(m1)
d2_ = self.trasit2(d2)
# print(d2.shape)
m2 = self.maxpool2(d2)
# print(m2.shape)
d3 = self.dense3(m2)
d3_ = self.trasit3(d3)
# print(d3.shape)
m3 = self.maxpool3(d3)
# print(m3.shape)
d4 = self.dense4(m3)
d4_ = self.trasit4(d4)
# print(d4_.shape)
m4 = self.maxpool4(d4)
# print(m4.shape)
bn = self.bottleneck(m4)
# print(bn.shape)
up1 = self.up_cat1(bn, d4_)
# print(up1.shape)
up2 = self.up_cat2(up1, d3_)
# print(up2.shape)
up3 = self.up_cat3(up2, d2_)
# print(up3.shape)
up4 = self.up_cat4(up3, d1_)
# print(up4.shape)
out = self.outconv(up4)
# print(out.shape)
return out | 2.5 | 2 |
leetcode/week_race/virtual_race/divide_array_in_consecutive_nums.py | BillionsRichard/pycharmWorkspace | 0 | 12771710 | <filename>leetcode/week_race/virtual_race/divide_array_in_consecutive_nums.py
# encoding: utf-8
"""
@version: v1.0
@author: Richard
@license: Apache Licence
@contact: <EMAIL>
@site:
@software: PyCharm
@time: 2019/12/22 23:14
"""
from typing import List
import heapq
class Solution:
def isPossibleDivide(self, nums: List[int], k: int) -> bool:
if len(nums) % k != 0:
return False
if k == 1:
return True
#
nums.sort()
# while nums:
# min_num = nums[0]
# max_num = nums[0] + k - 1
# for i in range(max_num, min_num-1, -1):
# if i in nums:
# nums.remove(i)
# else:
# return False
# return True
heapq.heapify(nums)
while nums:
cur_min = heapq.heappop(nums)
for i in range(1, k):
try:
nums.remove(i + cur_min)
except Exception as e:
print(e)
return False
return True
if __name__ == '__main__':
s = Solution()
print(s.isPossibleDivide([1,2,3,3,4,4,5,6], 4))
print(s.isPossibleDivide([3,2,1,2,3,4,3,4,5,9,10,11], 3))
print(s.isPossibleDivide([3,3,2,2,1,1], 3))
print(s.isPossibleDivide([1,2,3,4], 3)) | 3.4375 | 3 |
modulos/thread_sync.py | ingprovencio/pyvisionapp | 0 | 12771711 | <filename>modulos/thread_sync.py
# Lint as: python3
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import socket
import sys
import time
import json
class Modulo:
def __init__(self):
None
def start(self,nombre,local_data, out_data):
out_data[nombre]['error'] = {}
def work(self,nombre,local_data, out_data):
try:
None
except:
None
def onError(self,nombre,local_data, out_data):
None
def event (self, nombre, local, out, event, event_sync):
None
def end (self, nombre, local_data, out_data):
None
| 2.484375 | 2 |
get_url/komand_get_url/actions/get_file/action.py | emartin-merrill-r7/insightconnect-plugins | 1 | 12771712 | import komand
from .schema import GetFileInput, GetFileOutput
# Custom imports below
from komand_get_url.util.utils import Utils
class GetFile(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='get_file',
description='Download a file by URL',
input=GetFileInput(),
output=GetFileOutput())
def run(self, params={}):
utils = Utils(action=self)
url = params.get('url')
checksum = params.get('checksum')
tout = params.get('timeout', 60)
is_verify = params.get('is_verify', True)
# Check for supported url prefix
utils.validate_url(url)
meta = utils.hash_url(url)
cache_file = '/var/cache/' + meta['file']
# Attempt to retrieve headers from past request
headers = {}
if komand.helper.check_cachefile(meta['metafile']):
headers = utils.check_url_meta_file(meta)
# Download file
urlobj = komand.helper.open_url(
url, timeout=tout, verify=is_verify,
If_None_Match=headers.get('etag', ''),
If_Modified_Since=headers.get('last-modified', ''))
if urlobj:
contents = urlobj.read()
# Optional integrity check of file
if checksum:
if not komand.helper.check_hashes(contents, checksum):
self.logger.error('GetFile: File Checksum Failed')
raise Exception('GetURL Failed')
# Write etag and last modified to cache
utils.create_url_meta_file(meta, urlobj)
# Write URL file contents to cache
f = komand.helper.open_cachefile(cache_file)
f.write(contents)
f.close()
# Check URL status code and return file contents
if urlobj.code >= 200 or urlobj.code <= 299:
f = komand.helper.encode_string(contents)
if f:
return {'file': f, 'status_code': urlobj.code or 200}
# When the download fails or file is not modified
if urlobj is None:
# Attempt to return file from cache if available
self.logger.info('GetURL: File not modified: %s', url)
if komand.helper.check_cachefile(cache_file):
f = komand.helper.encode_file(cache_file)
self.logger.info('GetURL: File returned from cache: %s', cache_file)
return {'bytes': f, 'status_code': 200}
# If file hasn't been returned then we fail
self.logger.info('GetURL: Download failed for %s', url)
raise Exception('GetURL Failed')
def test(self, params={}):
url = 'https://www.google.com'
komand.helper.check_url(url)
return {}
| 2.1875 | 2 |
app/recipe/tests/test_recipe_api.py | DamienPond001/recipe-app-api | 0 | 12771713 | import tempfile
import os
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPE_URL = reverse('recipe:recipe-list')
def image_upload_url(recipe_id):
"""Return url for image"""
return reverse('recipe:recipe-upload-image', args=[recipe_id])
def detail_url(recipe_id):
"""Return recipe detail url"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name="default tag"):
"""return sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name="default ingredient"):
"""return ingrediant"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Sample Recipe',
'time_minutes': 10,
'price': 5.00
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeAPITests(TestCase):
"""Test unauthed recipe api access"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""test that auth is required"""
response = self.client.get(RECIPE_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeAPITest(TestCase):
"""Test authed recipe endpoints"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'pestPW'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
sample_recipe(user=self.user)
sample_recipe(user=self.user, title="Another Recipe")
response = self.client.get(RECIPE_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
def test_recipes_limited_to_user(self):
"""test that only authed user recipes returned"""
user_2 = get_user_model().objects.create_user(
'<EMAIL>',
'otherPW'
)
sample_recipe(user=user_2)
sample_recipe(user=user_2, title="Another Recipe")
sample_recipe(user=self.user)
response = self.client.get(RECIPE_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
self.assertEqual(response.data, serializer.data)
def test_view_recipe_detail(self):
"""test viewing recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
response = self.client.get(url)
serialiser = RecipeDetailSerializer(recipe)
self.assertEqual(response.data, serialiser.data)
def test_create_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': 'Choco Cheese',
'time_minutes': 10,
'price': 20
}
response = self.client.post(RECIPE_URL, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=response.data['id'])
for k, v in payload.items():
self.assertEqual(v, getattr(recipe, k))
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag_1 = sample_tag(user=self.user, name='Vegan')
tag_2 = sample_tag(user=self.user, name="Dessert")
payload = {
'title': 'Cheesecake',
'tags': [tag_1.id, tag_2.id],
'time_minutes': 60,
'price': 20.00
}
response = self.client.post(RECIPE_URL, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=response.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag_1, tags)
self.assertIn(tag_2, tags)
def test_create_recipe_with_ingredients(self):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1')
ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2')
payload = {
'title': 'Test recipe with ingredients',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 45,
'price': 15.00
}
res = self.client.post(RECIPE_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient2, ingredients)
def test_partial_update_recipe(self):
"""Test updating a recipe with patch"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_tag = sample_tag(user=self.user, name='Curry')
payload = {'title': 'Chicken tikka', 'tags': [new_tag.id]}
url = detail_url(recipe.id)
self.client.patch(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 1)
self.assertIn(new_tag, tags)
def test_full_update_recipe(self):
"""Test updating a recipe with put"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
payload = {
'title': 'Spaghetti carbonara',
'time_minutes': 25,
'price': 5.00
}
url = detail_url(recipe.id)
self.client.put(url, payload)
recipe.refresh_from_db()
self.assertEqual(recipe.title, payload['title'])
self.assertEqual(recipe.time_minutes, payload['time_minutes'])
self.assertEqual(recipe.price, payload['price'])
tags = recipe.tags.all()
self.assertEqual(len(tags), 0)
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'testPW'
)
self.client.force_authenticate(self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image_to_recipe(self):
"""Test uploading an image to recipe"""
url = image_upload_url(self.recipe.id)
# this will create a temp file that we can write to within the
# context of the with and will remove it once we exit
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0) # way that python reads files; resets to beginning of file
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_image_bad_request(self):
"""Test uploading an invalid image"""
url = image_upload_url(self.recipe.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) | 2.484375 | 2 |
retrieval_rs/retrieval_rs/metric/rouge.py | microsoft/MRS | 2 | 12771714 | <filename>retrieval_rs/retrieval_rs/metric/rouge.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from retrieval_rs.metric import rouge_score as rouge_score
import sys
import os
SR_DIR = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, os.pardir))
sys.path.insert(0, SR_DIR)
class Rouge:
DEFAULT_METRICS = ["rouge-1", "rouge-2", "rouge-3", "rouge-L"]
AVAILABLE_METRICS = {
"rouge-1": lambda hyp, ref: rouge_score.rouge_n(hyp, ref, 1),
"rouge-2": lambda hyp, ref: rouge_score.rouge_n(hyp, ref, 2),
"rouge-3": lambda hyp, ref: rouge_score.rouge_n(hyp, ref, 3),
"rouge-L": lambda hyp, ref:
rouge_score.rouge_l_summary_level(hyp, ref),
}
DEFAULT_STATS = ["f", "p", "r"]
AVAILABLE_STATS = ["f", "p", "r"]
def __init__(self, metrics=None, stats=None):
if metrics is not None:
self.metrics = [m.lower() for m in metrics]
for m in self.metrics:
if m not in Rouge.AVAILABLE_METRICS:
raise ValueError("Unknown metric '%s'" % m)
else:
self.metrics = Rouge.DEFAULT_METRICS
if stats is not None:
self.stats = [s.lower() for s in stats]
for s in self.stats:
if s not in Rouge.AVAILABLE_STATS:
raise ValueError("Unknown stat '%s'" % s)
else:
self.stats = Rouge.DEFAULT_STATS
def get_scores(self, hyps, refs, avg=False, ignore_empty=False):
"""
calculate the rouge score of each pair of hyps and refs
:param hyps: a raw text of predicted response
:param refs: a raw text of golden response
:param avg: average
:param ignore_empty: Filter out hyps of 0 length
:return: rouge score
"""
if isinstance(hyps, str):
hyps, refs = [hyps], [refs]
if ignore_empty:
# Filter out hyps of 0 length
hyps_and_refs = zip(hyps, refs)
hyps_and_refs = [_ for _ in hyps_and_refs if len(_[0]) > 0]
hyps, refs = zip(*hyps_and_refs)
assert(type(hyps) == type(refs))
assert(len(hyps) == len(refs))
if not avg:
return self._get_scores(hyps, refs)
return self._get_avg_scores(hyps, refs)
def _get_scores(self, hyps, refs):
scores = []
for hyp, ref in zip(hyps, refs):
sen_score = {}
# MZ: modified to handle sentences that only have dots
hyp_sents = [sent for sent in hyp.split('.') if len(sent) > 0]
if len(hyp_sents) > 0:
hyp = [" ".join(_.split()) for _ in hyp.split(".") if len(_) > 0]
ref_sents = [sent for sent in ref.split('.') if len(sent) > 0]
if len(ref_sents) > 0:
ref = [" ".join(_.split()) for _ in ref.split(".") if len(_) > 0]
#hyp = [" ".join(_.split()) for _ in hyp.split(".") if len(_) > 0]
#ref = [" ".join(_.split()) for _ in ref.split(".") if len(_) > 0]
for m in self.metrics:
fn = Rouge.AVAILABLE_METRICS[m]
sc = fn(hyp, ref)
sen_score[m] = {s: sc[s] for s in self.stats}
scores.append(sen_score)
return scores
def _get_avg_scores(self, hyps, refs):
scores = {m: {s: 0 for s in self.stats} for m in self.metrics}
count = 0
for (hyp, ref) in zip(hyps, refs):
# MZ: modified to handle sentences that only have dots
hyp_sents = [sent for sent in hyp.split('.') if len(sent) > 0]
if len(hyp_sents) > 0:
hyp = [" ".join(_.split()) for _ in hyp.split(".") if len(_) > 0]
ref_sents = [sent for sent in ref.split('.') if len(sent) > 0]
if len(ref_sents) > 0:
ref = [" ".join(_.split()) for _ in ref.split(".") if len(_) > 0]
#hyp = [" ".join(_.split()) for _ in hyp.split(".") if len(_) > 0]
#ref = [" ".join(_.split()) for _ in ref.split(".") if len(_) > 0]
for m in self.metrics:
fn = Rouge.AVAILABLE_METRICS[m]
sc = fn(hyp, ref)
scores[m] = {s: scores[m][s] + sc[s] for s in self.stats}
count += 1
scores = {m: {s: scores[m][s] / count for s in scores[m]}
for m in scores}
return scores
| 2.46875 | 2 |
katrain.py | Kameone/katrain | 0 | 12771715 | from kivy.config import Config # isort:skip
Config.set("input", "mouse", "mouse,multitouch_on_demand") # isort:skip # no red dots on right click
ICON = "img/icon.png"
Config.set("kivy", "window_icon", ICON) # isort:skip # set icon before Window is imported
import signal
import os
import sys
import threading
import traceback
from queue import Queue
from kivy.app import App
from kivy.core.clipboard import Clipboard
from kivy.storage.jsonstore import JsonStore
from kivy.uix.popup import Popup
from core.ai import ai_move
from core.common import OUTPUT_INFO, OUTPUT_ERROR, OUTPUT_DEBUG, OUTPUT_EXTRA_DEBUG, OUTPUT_KATAGO_STDERR
from core.engine import KataGoEngine
from core.game import Game, IllegalMoveException, KaTrainSGF
from core.sgf_parser import Move, ParseError
from gui import *
class KaTrainGui(BoxLayout):
"""Top level class responsible for tying everything together"""
def __init__(self, **kwargs):
super(KaTrainGui, self).__init__(**kwargs)
self.debug_level = 0
self.engine = None
self.game = None
self.new_game_popup = None
self.fileselect_popup = None
self.config_popup = None
self.logger = lambda message, level=OUTPUT_INFO: self.log(message, level)
self._load_config()
self.debug_level = self.config("debug/level", OUTPUT_INFO)
self.controls.ai_mode_groups["W"].values = self.controls.ai_mode_groups["B"].values = list(self.config("ai").keys())
self.message_queue = Queue()
self._keyboard = Window.request_keyboard(None, self, "")
self._keyboard.bind(on_key_down=self._on_keyboard_down)
def log(self, message, level=OUTPUT_INFO):
if level == OUTPUT_KATAGO_STDERR:
if "starting" in message.lower():
self.controls.set_status(f"KataGo engine starting...")
if message.startswith("Tuning"):
self.controls.set_status(f"KataGo is tuning settings for first startup, please wait." + message)
if "ready" in message.lower():
self.controls.set_status(f"KataGo engine ready.")
print(f"[KG:STDERR]{message.strip()}")
elif level == OUTPUT_ERROR:
self.controls.set_status(f"ERROR: {message}")
print(f"ERROR: {message}")
elif self.debug_level >= level:
print(message)
def _load_config(self):
base_path = getattr(sys, "_MEIPASS", os.getcwd()) # for pyinstaller
config_file = sys.argv[1] if len(sys.argv) > 1 else os.path.join(base_path, "config.json")
try:
self.log(f"Using config file {config_file}", OUTPUT_INFO)
self._config_store = JsonStore(config_file, indent=4)
self._config = dict(self._config_store)
except Exception as e:
self.log(f"Failed to load config {config_file}: {e}", OUTPUT_ERROR)
sys.exit(1)
def save_config(self):
for k, v in self._config.items():
self._config_store.put(k, **v)
def config(self, setting, default=None):
try:
if "/" in setting:
cat, key = setting.split("/")
return self._config[cat].get(key, default)
else:
return self._config[setting]
except KeyError:
self.log(f"Missing configuration option {setting}", OUTPUT_ERROR)
def start(self):
if self.engine:
return
self.board_gui.trainer_config = self.config("trainer")
self.board_gui.ui_config = self.config("board_ui")
self.engine = KataGoEngine(self, self.config("engine"))
threading.Thread(target=self._message_loop_thread, daemon=True).start()
self._do_new_game()
def update_state(self, redraw_board=False): # is called after every message and on receiving analyses and config changes
# AI and Trainer/auto-undo handlers
cn = self.game.current_node
auto_undo = cn.player and "undo" in self.controls.player_mode(cn.player)
if auto_undo and cn.analysis_ready and cn.parent and cn.parent.analysis_ready and not cn.children and not self.game.ended:
self.game.analyze_undo(cn, self.config("trainer")) # not via message loop
if cn.analysis_ready and "ai" in self.controls.player_mode(cn.next_player).lower() and not cn.children and not self.game.ended and not (auto_undo and cn.auto_undo is None):
self._do_ai_move(cn) # cn mismatch stops this if undo fired. avoid message loop here or fires repeatedly.
# Handle prisoners and next player display
prisoners = self.game.prisoner_count
top, bot = self.board_controls.black_prisoners.__self__, self.board_controls.white_prisoners.__self__ # no weakref
if self.game.next_player == "W":
top, bot = bot, top
self.board_controls.mid_circles_container.clear_widgets()
self.board_controls.mid_circles_container.add_widget(bot)
self.board_controls.mid_circles_container.add_widget(top)
self.board_controls.black_prisoners.text = str(prisoners["W"])
self.board_controls.white_prisoners.text = str(prisoners["B"])
# update engine status dot
if not self.engine or not self.engine.katago_process or self.engine.katago_process.poll() is not None:
self.board_controls.engine_status_col = self.config("board_ui/engine_down_col")
elif len(self.engine.queries) >= 4:
self.board_controls.engine_status_col = self.config("board_ui/engine_busy_col")
elif len(self.engine.queries) >= 2:
self.board_controls.engine_status_col = self.config("board_ui/engine_little_busy_col")
elif len(self.engine.queries) == 0:
self.board_controls.engine_status_col = self.config("board_ui/engine_ready_col")
else:
self.board_controls.engine_status_col = self.config("board_ui/engine_almost_done_col")
# redraw
if redraw_board:
Clock.schedule_once(self.board_gui.draw_board, -1)
self.board_gui.redraw_board_contents_trigger()
self.controls.update_evaluation()
def _message_loop_thread(self):
while True:
game, msg, *args = self.message_queue.get()
try:
self.log(f"Message Loop Received {msg}: {args} for Game {game}", OUTPUT_EXTRA_DEBUG)
if game != self.game.game_id:
self.log(f"Message skipped as it is outdated (current game is {self.game.game_id}", OUTPUT_EXTRA_DEBUG)
continue
getattr(self, f"_do_{msg.replace('-','_')}")(*args)
self.update_state()
except Exception as e:
self.log(f"Exception in processing message {msg} {args}: {e}", OUTPUT_ERROR)
traceback.print_exc()
def __call__(self, message, *args):
if self.game:
self.message_queue.put([self.game.game_id, message, *args])
def _do_new_game(self, move_tree=None, analyze_fast=False):
self.engine.on_new_game() # clear queries
self.game = Game(self, self.engine, self.config("game"), move_tree=move_tree, analyze_fast=analyze_fast)
self.controls.select_mode("analyze" if move_tree and len(move_tree.nodes_in_tree) > 1 else "play")
self.controls.graph.initialize_from_game(self.game.root)
self.update_state(redraw_board=True)
def _do_ai_move(self, node=None):
if node is None or self.game.current_node == node:
mode = self.controls.ai_mode(self.game.current_node.next_player)
settings = self.config(f"ai/{mode}")
if settings:
ai_move(self.game, mode, settings)
def _do_undo(self, n_times=1):
self.game.undo(n_times)
def _do_redo(self, n_times=1):
self.game.redo(n_times)
def _do_switch_branch(self, direction):
self.game.switch_branch(direction)
def _do_play(self, coords):
try:
self.game.play(Move(coords, player=self.game.next_player))
except IllegalMoveException as e:
self.controls.set_status(f"Illegal Move: {str(e)}")
def _do_analyze_extra(self, mode):
self.game.analyze_extra(mode)
def _do_analyze_sgf_popup(self):
if not self.fileselect_popup:
self.fileselect_popup = Popup(title="Double Click SGF file to analyze", size_hint=(0.8, 0.8)).__self__
popup_contents = LoadSGFPopup()
self.fileselect_popup.add_widget(popup_contents)
popup_contents.filesel.path = os.path.abspath(os.path.expanduser(self.config("sgf/sgf_load")))
def readfile(files, _mouse):
self.fileselect_popup.dismiss()
try:
move_tree = KaTrainSGF.parse_file(files[0])
except ParseError as e:
self.log(f"Failed to load SGF. Parse Error: {e}", OUTPUT_ERROR)
return
self._do_new_game(move_tree=move_tree, analyze_fast=popup_contents.fast.active)
if not popup_contents.rewind.active:
self.game.redo(999)
popup_contents.filesel.on_submit = readfile
self.fileselect_popup.open()
def _do_new_game_popup(self):
if not self.new_game_popup:
self.new_game_popup = Popup(title="New Game", size_hint=(0.5, 0.6)).__self__
popup_contents = NewGamePopup(self, self.new_game_popup, {k: v[0] for k, v in self.game.root.properties.items() if len(v) == 1})
self.new_game_popup.add_widget(popup_contents)
self.new_game_popup.open()
def _do_config_popup(self):
if not self.config_popup:
self.config_popup = Popup(title="Edit Settings", size_hint=(0.9, 0.9)).__self__
popup_contents = ConfigPopup(self, self.config_popup, dict(self._config), ignore_cats=("trainer", "ai"))
self.config_popup.add_widget(popup_contents)
self.config_popup.open()
def _do_output_sgf(self):
for pl in Move.PLAYERS:
if not self.game.root.get_property(f"P{pl}"):
_, model_file = os.path.split(self.engine.config["model"])
self.game.root.set_property(
f"P{pl}", f"AI {self.controls.ai_mode(pl)} (KataGo { os.path.splitext(model_file)[0]})" if "ai" in self.controls.player_mode(pl) else "Player"
)
msg = self.game.write_sgf(
self.config("sgf/sgf_save"),
trainer_config=self.config("trainer"),
save_feedback=self.config("sgf/save_feedback"),
eval_thresholds=self.config("trainer/eval_thresholds"),
)
self.log(msg, OUTPUT_INFO)
self.controls.set_status(msg)
def load_sgf_from_clipboard(self):
clipboard = Clipboard.paste()
if not clipboard:
self.controls.set_status(f"Ctrl-V pressed but clipboard is empty.")
return
try:
move_tree = KaTrainSGF.parse(clipboard)
except Exception as e:
self.controls.set_status(f"Failed to imported game from clipboard: {e}\nClipboard contents: {clipboard[:50]}...")
return
move_tree.nodes_in_tree[-1].analyze(self.engine, analyze_fast=False) # speed up result for looking at end of game
self._do_new_game(move_tree=move_tree, analyze_fast=True)
self("redo", 999)
self.log("Imported game from clipboard.", OUTPUT_INFO)
def on_touch_up(self, touch):
if self.board_gui.collide_point(*touch.pos) or self.board_controls.collide_point(*touch.pos):
if touch.button == "scrollup":
self("redo")
elif touch.button == "scrolldown":
self("undo")
return super().on_touch_up(touch)
def _on_keyboard_down(self, _keyboard, keycode, _text, modifiers):
if isinstance(App.get_running_app().root_window.children[0], Popup):
return # if in new game or load, don't allow keyboard shortcuts
shortcuts = {
"q": self.controls.show_children,
"w": self.controls.eval,
"e": self.controls.hints,
"r": self.controls.ownership,
"t": self.controls.policy,
"enter": ("ai-move",),
"a": self.controls.analyze_extra,
"s": self.controls.analyze_equalize,
"d": self.controls.analyze_sweep,
"right": ("switch-branch", 1),
"left": ("switch-branch", -1),
}
if keycode[1] in shortcuts.keys():
shortcut = shortcuts[keycode[1]]
if isinstance(shortcut, Widget):
shortcut.trigger_action(duration=0)
else:
self(*shortcut)
elif keycode[1] == "tab":
self.controls.switch_mode()
elif keycode[1] == "spacebar":
self("play", None) # pass
elif keycode[1] in ["`", "~", "p"]:
self.controls_box.hidden = not self.controls_box.hidden
elif keycode[1] in ["up", "z"]:
self("undo", 1 + ("shift" in modifiers) * 9 + ("ctrl" in modifiers) * 999)
elif keycode[1] in ["down", "x"]:
self("redo", 1 + ("shift" in modifiers) * 9 + ("ctrl" in modifiers) * 999)
elif keycode[1] == "n" and "ctrl" in modifiers:
self("new-game-popup")
elif keycode[1] == "l" and "ctrl" in modifiers:
self("analyze-sgf-popup")
elif keycode[1] == "s" and "ctrl" in modifiers:
self("output-sgf")
elif keycode[1] == "c" and "ctrl" in modifiers:
Clipboard.copy(self.game.root.sgf())
self.controls.set_status("Copied SGF to clipboard.")
elif keycode[1] == "v" and "ctrl" in modifiers:
self.load_sgf_from_clipboard()
return True
class KaTrainApp(App):
gui = ObjectProperty(None)
def build(self):
self.icon = ICON # how you're supposed to set an icon
self.gui = KaTrainGui()
print(self.get_application_icon())
Window.bind(on_request_close=self.on_request_close)
return self.gui
def on_start(self):
self.gui.start()
def on_request_close(self, *args):
if getattr(self, "gui", None) and self.gui.engine:
self.gui.engine.shutdown()
def signal_handler(self, *args):
if self.gui.debug_level >= OUTPUT_DEBUG:
print("TRACEBACKS")
for threadId, stack in sys._current_frames().items():
print(f"\n# ThreadID: {threadId}")
for filename, lineno, name, line in traceback.extract_stack(stack):
print(f"\tFile: {filename}, line {lineno}, in {name}")
if line:
print(f"\t\t{line.strip()}")
self.on_request_close()
sys.exit(0)
if __name__ == "__main__":
app = KaTrainApp()
signal.signal(signal.SIGINT, app.signal_handler)
try:
app.run()
except Exception:
app.on_request_close()
raise
| 2.21875 | 2 |
app/api/cms/quota.py | yyywang/quota | 0 | 12771716 | <filename>app/api/cms/quota.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created by Wesley on 2020/3/23.
"""
from flask import current_app, jsonify
from lin import admin_required
from lin.exception import Success
from lin.redprint import Redprint
from app.libs.utils import json_paginate
from app.models.quota import Quota
from app.validators.forms import CreateQuotaForm, PaginationForm, QuotaSetCategoryForm
quota_api = Redprint('quota')
@quota_api.route('', methods=['POST'])
@admin_required
def create_quota():
"""创建语录"""
form = CreateQuotaForm().validate_for_api()
Quota.create(
content=form.content.data,
content_text=form.content.plain_text,
category_id=form.category_id.data,
commit=True)
return Success()
@quota_api.route('', methods=['GET'])
@admin_required
def get_quotas():
"""获取语录,分页返回"""
form = PaginationForm().validate_for_api()
per_page = current_app.config.get('COUNT_DEFAULT')
paginate = Quota.query.filter_by(delete_time=None).order_by(
Quota._create_time.desc()).paginate(form.page.data, per_page)
return jsonify(json_paginate(paginate))
@quota_api.route('/<int:qid>')
@admin_required
def get_quota(qid):
"""获取语录具体信息"""
quota = Quota.query.filter_by(delete_time=None, id=qid).first_or_404()
return jsonify(quota)
@quota_api.route('/<int:qid>', methods=['PUT'])
@admin_required
def update_quota(qid):
"""更新id=qid的语录信息"""
form = CreateQuotaForm().validate_for_api()
Quota.update_quota(qid, form)
return Success(msg='更新语录成功')
@quota_api.route('/<int:qid>', methods=['DELETE'])
@admin_required
def delete_quota(qid):
Quota.remove_quota(qid)
return Success(msg='语录删除成功')
@quota_api.route('/<int:qid>/category/set', methods=['POST'])
@admin_required
def set_categroy(qid):
"""为短句设置分类"""
form = QuotaSetCategoryForm().validate_for_api()
Quota.set_category(qid, form.category_id.data)
return Success() | 1.992188 | 2 |
PythonDesafios/d075.py | adaatii/Python-Curso-em-Video- | 0 | 12771717 | <reponame>adaatii/Python-Curso-em-Video-
# Desenvolva um programa que leia quatro valores
# pelo teclado e guarde-os em uma tupla.
# No final, mostre:
# A) Quantas vezes apareceu o valor 9.
# B) Em que posição foi digitado o primeiro valor 3.
# C) Quais foram os números pares.
num = (int(input('Digite um numero: ')),
int(input('Digite outro numero: ')),
int(input('Digite mais um numero: ')),
int(input('Digite o ultimo numero: ')))
print(f'Voce digitou os valors: {num}')
print(f'O valor 9 apareceu {num.count(9)} vezes')
if 3 in num:
print(f'O valor 3 apareceu {num.index(3)+1}° posição')
else:
print('O valor 3 não foi digitado.')
print('Os numeros pares foram: ')
for i in num:
if i % 2 ==0:
print(i, end=' ')
else:
print('Não foram digitados numeros pares')
break
| 4.40625 | 4 |
bumblebee_status/modules/contrib/playerctl.py | kongr45gpen/bumblebee-status | 0 | 12771718 | <filename>bumblebee_status/modules/contrib/playerctl.py
# pylint: disable=C0111,R0903
"""Displays information about the current song in vlc, audacious, bmp, xmms2, spotify and others
Requires the following executable:
* playerctl
Parameters:
* playerctl.format: Format string (defaults to '{artist} - {title}')
Available values are: {album}, {title}, {artist}, {trackNumber}
* playerctl.layout: Comma-separated list to change order of widgets (defaults to song, previous, pause, next)
Widget names are: playerctl.song, playerctl.prev, playerctl.pause, playerctl.next
Parameters are inherited from `spotify` module, many thanks to its developers!
contributed by `smitajit <https://github.com/smitajit>`_ - many thanks!
"""
import core.module
import core.widget
import core.input
import util.cli
import util.format
import logging
class Module(core.module.Module):
def __init__(self, config, theme):
super(Module, self).__init__(config, theme, [])
self.background = True
self.__layout = util.format.aslist(
self.parameter(
"layout", "playerctl.prev, playerctl.song, playerctl.pause, playerctl.next"
)
)
self.__song = ""
self.__cmd = "playerctl "
self.__format = self.parameter("format", "{artist} - {title}")
widget_map = {}
for widget_name in self.__layout:
widget = self.add_widget(name=widget_name)
if widget_name == "playerctl.prev":
widget_map[widget] = {
"button": core.input.LEFT_MOUSE,
"cmd": self.__cmd + "previous",
}
widget.set("state", "prev")
elif widget_name == "playerctl.pause":
widget_map[widget] = {
"button": core.input.LEFT_MOUSE,
"cmd": self.__cmd + "play-pause",
}
elif widget_name == "playerctl.next":
widget_map[widget] = {
"button": core.input.LEFT_MOUSE,
"cmd": self.__cmd + "next",
}
widget.set("state", "next")
elif widget_name == "playerctl.song":
widget_map[widget] = [
{
"button": core.input.LEFT_MOUSE,
"cmd": self.__cmd + "play-pause",
}, {
"button": core.input.WHEEL_UP,
"cmd": self.__cmd + "next",
}, {
"button": core.input.WHEEL_DOWN,
"cmd": self.__cmd + "previous",
}
]
else:
raise KeyError(
"The playerctl module does not have a {widget_name!r} widget".format(
widget_name=widget_name
)
)
for widget, callback_options in widget_map.items():
if isinstance(callback_options, dict):
core.input.register(widget, **callback_options)
def update(self):
try:
self.__get_song()
playback_status = str(util.cli.execute(self.__cmd + "status")).strip()
for widget in self.widgets():
if widget.name == "playerctl.pause":
if playback_status != "":
if playback_status == "Playing":
widget.set("state", "playing")
else:
widget.set("state", "paused")
elif widget.name == "playerctl.song":
widget.set("state", "song")
note = ""
if playback_status == "Playing":
note = "🎵 "
widget.full_text(note + self.__song)
except Exception as e:
logging.exception(e)
self.__song = ""
def __get_song(self):
album = str(util.cli.execute(self.__cmd + "metadata xesam:album")).strip()
title = str(util.cli.execute(self.__cmd + "metadata xesam:title")).strip()
artist = str(util.cli.execute(self.__cmd + "metadata xesam:albumArtist")).strip()
track_number = str(util.cli.execute(self.__cmd + "metadata xesam:trackNumber")).strip()
self.__song = self.__format.format(
album = album,
title = title,
artist = artist,
trackNumber = track_number
)
| 2.96875 | 3 |
tasks/test/__init__.py | remarkablerocket/changelog-cli | 16 | 12771719 | <reponame>remarkablerocket/changelog-cli
"""
Run Test Suite
"""
from invoke import task
@task
def test(context):
"""
Runs Test Suite
"""
context.run('coverage run -m unittest discover')
| 1.390625 | 1 |
pw_tokenizer/py/pw_tokenizer/decode.py | Tiggerlaboratoriet/pigweed | 1 | 12771720 | # Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Decodes arguments and formats tokenized messages.
The decode(format_string, encoded_arguments) function provides a simple way to
format a string with encoded arguments. The FormatString class may also be used.
Missing, truncated, or otherwise corrupted arguments are handled and displayed
in the resulting string with an error message.
"""
from datetime import datetime
import re
import struct
from typing import Iterable, List, NamedTuple, Match, Sequence, Tuple
def zigzag_decode(value: int) -> int:
"""ZigZag decode function from protobuf's wire_format module."""
if not value & 0x1:
return value >> 1
return (value >> 1) ^ (~0)
class FormatSpec:
"""Represents a format specifier parsed from a printf-style string."""
# Regular expression for finding format specifiers.
FORMAT_SPEC = re.compile(r'%(?:(?P<flags>[+\- #0]*\d*(?:\.\d+)?)'
r'(?P<length>hh|h|ll|l|j|z|t|L)?'
r'(?P<type>[csdioxXufFeEaAgGnp])|%)')
# Conversions to make format strings Python compatible.
_UNSUPPORTED_LENGTH = frozenset(['hh', 'll', 'j', 'z', 't'])
_REMAP_TYPE = {'a': 'f', 'A': 'F'}
# Conversion specifiers by type; n is not supported.
_SIGNED_INT = 'di'
_UNSIGNED_INT = frozenset('oxXup')
_FLOATING_POINT = frozenset('fFeEaAgG')
_PACKED_FLOAT = struct.Struct('<f')
@classmethod
def from_string(cls, format_specifier: str):
"""Creates a FormatSpec from a str with a single format specifier."""
match = cls.FORMAT_SPEC.fullmatch(format_specifier)
if not match:
raise ValueError(
'{!r} is not a valid single format specifier'.format(
format_specifier))
return cls(match)
def __init__(self, re_match: Match):
"""Constructs a FormatSpec from an re.Match object for FORMAT_SPEC."""
self.match = re_match
self.specifier: str = self.match.group()
self.flags: str = self.match.group('flags') or ''
self.length: str = self.match.group('length') or ''
# If there is no type, the format spec is %%.
self.type: str = self.match.group('type') or '%'
# %p prints as 0xFEEDBEEF; other specs may need length/type switched
if self.type == 'p':
self.compatible = '0x%08X'
else:
self.compatible = ''.join([
'%', self.flags,
'' if self.length in self._UNSUPPORTED_LENGTH else '',
self._REMAP_TYPE.get(self.type, self.type)
])
def decode(self, encoded_arg: bytes) -> 'DecodedArg':
"""Decodes the provided data according to this format specifier."""
if self.type == '%': # literal %
return DecodedArg(self, (),
b'') # Use () as the value for % formatting.
if self.type == 's': # string
return self._decode_string(encoded_arg)
if self.type == 'c': # character
return self._decode_char(encoded_arg)
if self.type in self._SIGNED_INT:
return self._decode_signed_integer(encoded_arg)
if self.type in self._UNSIGNED_INT:
return self._decode_unsigned_integer(encoded_arg)
if self.type in self._FLOATING_POINT:
return self._decode_float(encoded_arg)
# Unsupported specifier (e.g. %n)
return DecodedArg(
self, None, b'', DecodedArg.DECODE_ERROR,
'Unsupported conversion specifier "{}"'.format(self.type))
def _decode_signed_integer(self, encoded: bytes) -> 'DecodedArg':
"""Decodes a signed variable-length integer."""
if not encoded:
return DecodedArg.missing(self)
count = 0
result = 0
shift = 0
for byte in encoded:
count += 1
result |= (byte & 0x7f) << shift
if not byte & 0x80:
return DecodedArg(self, zigzag_decode(result), encoded[:count])
shift += 7
if shift >= 64:
break
return DecodedArg(self, None, encoded[:count], DecodedArg.DECODE_ERROR,
'Unterminated variable-length integer')
def _decode_unsigned_integer(self, encoded: bytes) -> 'DecodedArg':
arg = self._decode_signed_integer(encoded)
# Since ZigZag encoding is used, unsigned integers must be masked off to
# their original bit length.
if arg.value is not None:
arg.value &= (1 << self.size_bits()) - 1
return arg
def _decode_float(self, encoded: bytes) -> 'DecodedArg':
if len(encoded) < 4:
return DecodedArg.missing(self)
return DecodedArg(self,
self._PACKED_FLOAT.unpack_from(encoded)[0],
encoded[:4])
def _decode_string(self, encoded: bytes) -> 'DecodedArg':
"""Reads a unicode string from the encoded data."""
if not encoded:
return DecodedArg.missing(self)
size_and_status = encoded[0]
status = DecodedArg.OK
if size_and_status & 0x80:
status |= DecodedArg.TRUNCATED
size_and_status &= 0x7f
raw_data = encoded[0:size_and_status + 1]
data = raw_data[1:]
if len(data) < size_and_status:
status |= DecodedArg.DECODE_ERROR
try:
decoded = data.decode()
except UnicodeDecodeError as err:
return DecodedArg(self,
repr(bytes(data)).lstrip('b'), raw_data,
status | DecodedArg.DECODE_ERROR, err)
return DecodedArg(self, decoded, raw_data, status)
def _decode_char(self, encoded: bytes) -> 'DecodedArg':
"""Reads an integer from the data, then converts it to a string."""
arg = self._decode_signed_integer(encoded)
if arg.ok():
try:
arg.value = chr(arg.value)
except (OverflowError, ValueError) as err:
arg.error = err
arg.status |= DecodedArg.DECODE_ERROR
return arg
def size_bits(self) -> int:
"""Size of the argument in bits; 0 for strings."""
if self.type == 's':
return 0
# TODO(hepler): 64-bit targets likely have 64-bit l, j, z, and t.
return 64 if self.length in ['ll', 'j'] else 32
def __str__(self) -> str:
return self.specifier
class DecodedArg:
"""Represents a decoded argument that is ready to be formatted."""
# Status flags for a decoded argument. These values should match the
# DecodingStatus enum in pw_tokenizer/internal/decode.h.
OK = 0 # decoding was successful
MISSING = 1 # the argument was not present in the data
TRUNCATED = 2 # the argument was truncated during encoding
DECODE_ERROR = 4 # an error occurred while decoding the argument
SKIPPED = 8 # argument was skipped due to a previous error
@classmethod
def missing(cls, specifier: FormatSpec):
return cls(specifier, None, b'', cls.MISSING)
def __init__(self,
specifier: FormatSpec,
value,
raw_data: bytes,
status: int = OK,
error=None):
self.specifier = specifier # FormatSpec (e.g. to represent "%0.2f")
self.value = value # the decoded value, or None if decoding failed
self.raw_data = bytes(
raw_data) # the exact bytes used to decode this arg
self._status = status
self.error = error
def ok(self) -> bool:
"""The argument was decoded without errors."""
return self.status == self.OK or self.status == self.TRUNCATED
@property
def status(self) -> int:
return self._status
@status.setter
def status(self, status: int):
# The %% specifier is always OK and should always be printed normally.
self._status = status if self.specifier.type != '%' else self.OK
def format(self) -> str:
"""Returns formatted version of this argument, with error handling."""
if self.status == self.TRUNCATED:
return self.specifier.compatible % (self.value + '[...]')
if self.ok():
try:
return self.specifier.compatible % self.value
except (OverflowError, TypeError, ValueError) as err:
self.status |= self.DECODE_ERROR
self.error = err
if self.status & self.SKIPPED:
message = '{} SKIPPED'.format(self.specifier)
elif self.status == self.MISSING:
message = '{} MISSING'.format(self.specifier)
elif self.status & self.DECODE_ERROR:
message = '{} ERROR'.format(self.specifier)
else:
raise AssertionError('Unhandled DecodedArg status {:x}!'.format(
self.status))
if self.value is None or not str(self.value):
return '<[{}]>'.format(message)
return '<[{} ({})]>'.format(message, self.value)
def __str__(self) -> str:
return self.format()
def __repr__(self) -> str:
return f'DecodedArg({self})'
def parse_format_specifiers(format_string: str) -> Iterable[FormatSpec]:
for spec in FormatSpec.FORMAT_SPEC.finditer(format_string):
yield FormatSpec(spec)
class FormattedString(NamedTuple):
value: str
args: Sequence[DecodedArg]
remaining: bytes
def ok(self) -> bool:
"""Arg data decoded successfully and all expected args were found."""
return all(arg.ok() for arg in self.args) and not self.remaining
def score(self, date_removed: datetime = None) -> tuple:
"""Returns a key for sorting by how successful a decode was.
Decoded strings are sorted by whether they
1. decoded all bytes for all arguments without errors,
2. decoded all data,
3. have the fewest decoding errors,
4. decoded the most arguments successfully, or
5. have the most recent removal date, if they were removed.
This must match the collision resolution logic in detokenize.cc.
To format a list of FormattedStrings from most to least successful,
use sort(key=FormattedString.score, reverse=True).
"""
return (
self.ok(), # decocoded all data and all expected args were found
not self.remaining, # decoded all data
-sum(not arg.ok() for arg in self.args), # fewest errors
len(self.args), # decoded the most arguments
date_removed or datetime.max) # most recently present
class FormatString:
"""Represents a printf-style format string."""
def __init__(self, format_string: str):
"""Parses format specifiers in the format string."""
self.format_string = format_string
self.specifiers = tuple(parse_format_specifiers(self.format_string))
# List of non-specifier string pieces with room for formatted arguments.
self._segments = self._parse_string_segments()
def _parse_string_segments(self) -> List:
"""Splits the format string by format specifiers."""
if not self.specifiers:
return [self.format_string]
spec_spans = [spec.match.span() for spec in self.specifiers]
# Start with the part of the format string up to the first specifier.
string_pieces = [self.format_string[:spec_spans[0][0]]]
for ((_, end1), (start2, _)) in zip(spec_spans[:-1], spec_spans[1:]):
string_pieces.append(self.format_string[end1:start2])
# Append the format string segment after the last format specifier.
string_pieces.append(self.format_string[spec_spans[-1][1]:])
# Make a list with spots for the replacements between the string pieces.
segments: List = [None] * (len(string_pieces) + len(self.specifiers))
segments[::2] = string_pieces
return segments
def decode(self, encoded: bytes) -> Tuple[Sequence[DecodedArg], bytes]:
"""Decodes arguments according to the format string.
Args:
encoded: bytes; the encoded arguments
Returns:
tuple with the decoded arguments and any unparsed data
"""
decoded_args = []
fatal_error = False
index = 0
for spec in self.specifiers:
arg = spec.decode(encoded[index:])
if fatal_error:
# After an error is encountered, continue to attempt to parse
# arguments, but mark them all as SKIPPED. If an error occurs,
# it's impossible to know if subsequent arguments are valid.
arg.status |= DecodedArg.SKIPPED
elif not arg.ok():
fatal_error = True
decoded_args.append(arg)
index += len(arg.raw_data)
return tuple(decoded_args), encoded[index:]
def format(self,
encoded_args: bytes,
show_errors: bool = False) -> FormattedString:
"""Decodes arguments and formats the string with them.
Args:
encoded_args: the arguments to decode and format the string with
show_errors: if True, an error message is used in place of the %
conversion specifier when an argument fails to decode
Returns:
tuple with the formatted string, decoded arguments, and remaining data
"""
# Insert formatted arguments in place of each format specifier.
args, remaining = self.decode(encoded_args)
if show_errors:
self._segments[1::2] = (arg.format() for arg in args)
else:
self._segments[1::2] = (arg.format()
if arg.ok() else arg.specifier.specifier
for arg in args)
return FormattedString(''.join(self._segments), args, remaining)
def decode(format_string: str,
encoded_arguments: bytes,
show_errors: bool = False) -> str:
"""Decodes arguments and formats them with the provided format string.
Args:
format_string: the printf-style format string
encoded_arguments: encoded arguments with which to format
format_string; must exclude the 4-byte string token
show_errors: if True, an error message is used in place of the %
conversion specifier when an argument fails to decode
Returns:
the printf-style formatted string
"""
return FormatString(format_string).format(encoded_arguments,
show_errors).value
| 2.234375 | 2 |
mmtfPyspark/tests/datasets/test_dbPtmDataset.py | sbliven/mmtf-pyspark | 59 | 12771721 | #!/usr/bin/env python
import unittest
from pyspark.sql import SparkSession
from mmtfPyspark.datasets import dbPtmDataset as pm
from mmtfPyspark.datasets.dbPtmDataset import PtmType
class DbPtmDatasetTest(unittest.TestCase):
def setUp(self):
self.spark = SparkSession.builder.master("local[*]") \
.appName("DbPtmDatasetTest") \
.getOrCreate()
def test1(self):
ds = pm.download_ptm_dataset(PtmType.S_LINKEDGLYCOSYLATION)
self.assertGreater(ds.count(), 4)
def test2(self):
ds = pm.get_ptm_dataset()
self.assertGreater(ds.count(), 900000)
def tearDown(self):
self.spark.stop()
if __name__ == '__main__':
unittest.main()
| 2.40625 | 2 |
skroute/metaheuristics/simulated_annealing/__init__.py | arubiales/scikit-route | 2 | 12771722 | from ._base_simulated_annealing._base_simulated_annealing import SimulatedAnnealing
from ._base_ensemble_simulated_annealing import EnsembleSimulatedAnnealing
__all__ = ['SimmulatedAnnealing', "EnsembleSimulatedAnnealing"]
| 0.976563 | 1 |
server/server/api/__init__.py | benetech/Winnow2.0 | 26 | 12771723 | # Disable flake8 issue F401 as we need these imports to configure api
# but not going to re-export them from the __init__
from . import ( # noqa: F401
scenes,
matches,
files,
errors,
videos,
cluster,
tasks,
socket,
templates,
examples,
template_matches,
file_filter_presets,
)
from .blueprint import api
# Explicitly reexport api
# See discussion in https://bugs.launchpad.net/pyflakes/+bug/1178905
__all__ = ["api"]
| 1.296875 | 1 |
spintorch/exch.py | a-papp/SpinTorch | 1 | 12771724 | <gh_stars>1-10
"""Class to calculate the exchange field"""
from torch import nn, tensor
class Exchange(nn.Module):
A_exch = 3.65e-12 # exchange coefficient (J/m)
def __init__(self, d: tuple):
super().__init__()
# defining the LAPLACE convolution kernel for exchange field
self.LAPLACE = nn.Conv2d(3, 3, 3, groups=3, padding=1, padding_mode='replicate', bias=False)
self.LAPLACE.weight.requires_grad = False
idx2, idy2 = 1.0/d[0]**2, 1.0/d[1]**2
self.LAPLACE.weight[:,] = tensor([[[0.0, idx2, 0.0 ],
[idy2, -2*(idx2+idy2), idy2],
[0.0, idx2, 0.0 ]]])
def forward(self, m, Msat):
"""
Calculate the exchange field of magnetization m.
Inputs: m normalized magnetization (pytorch tensor)
Msat saturation magnetization (pytorch tensor)
Outputs: exchange field (same size as m)
"""
B_exch = 2*self.A_exch/Msat * self.LAPLACE(m)
B_exch[:,:,Msat == 0] = 0 # handle division by 0
return B_exch
| 2.8125 | 3 |
src/lambda_process_doc.py | maximillianus/smart-document-check | 0 | 12771725 | <gh_stars>0
import json
import urllib.parse
import boto3
import base64
from pprint import pprint
import re
print('Loading function')
textractClient = boto3.client('textract')
s3 = boto3.client('s3')
DOCUMENT_TYPE_ENUM = {
'passport': 'PASSPORT',
'identity_card': 'IDENTITY_CARD',
'driving_license': 'DRIVER_LICENSE'
}
COUNTRY_ENUM = {
'ID': 'INDONESIA',
'PH': 'PHILIPPINES',
'TH': 'THAILAND',
'VN': 'VIETNAM',
'MY': 'MALAYSIA',
}
DRIVER_LICENSE_RESP = {
'country': '',
'documentType': 'DRIVER_LICENSE',
'extract_response':{
'LASTNAME': '<NAME>',
'FIRSTNAME': '<NAME>',
'MIDDLENAME': 'TEST MIDDLE',
'DATE_OF_BIRTH': '11/27/1985',
'ADDRESS': 'TEST LAST',
'LICENSE_NUMBER': '124214-6456-675',
'EXPIRATION_DATE': '11/27/2025',
'AGENCY_CODE': '0TH',
'RESTRICTIONS': 'TEST LAST',
'CONDITIONS': '124214-6456-675',
'BLOOD_TYPE': '11/27/2025',
'EYES_COLOR': '0TH',
'SEX': '0TH',
'HEIGHT_METER': 'TEST LAST',
'WEIGHT_KG': '124214-6456-675'
}
}
DRIVER_LICENSE = {
'LASTNAME': '',
'FIRSTNAME': '',
'MIDDLENAME': '',
'DATE_OF_BIRTH': '',
'ADDRESS': '',
'LICENSE_NUMBER': '',
'EXPIRATION_DATE': '',
'AGENCY_CODE': '',
'RESTRICTIONS': '',
'CONDITIONS': '',
'BLOOD_TYPE': '',
'EYES_COLOR': '',
'SEX': '0TH',
'HEIGHT_METER': '',
'WEIGHT_KG': ''
}
IDENTITY_CARD_RESP = {
'country': '',
'documentType': 'IDENTITY_CARD',
'extract_response':{
'LASTNAME': '',
'FIRSTNAME': '',
'MIDDLENAME': '',
'DATE_OF_BIRTH': '11/27/1985',
'PLACE_OF_BIRTH': '11/27/1985',
'ADDRESS': 'TEST LAST' ,
'IDENTITY_NUMBER': '124214-6456-675',
'EXPIRATION DATE': '11/27/2025',
'SEX': '0TH',
'NATIONALITY': '',
'OCCUPATION': ''
}
}
IDENTITY_CARD = {
'LASTNAME': '',
'FIRSTNAME': '',
'MIDDLENAME': '',
'DATE_OF_BIRTH': '',
'PLACE_OF_BIRTH': '',
'ADDRESS': '',
'IDENTITY_NUMBER': '',
'EXPIRATION DATE': '',
'SEX': '',
'NATIONALITY': '',
'OCCUPATION': ''
}
PASSPORT_RESP = {
'country': '',
'documentType': 'PASSPORT',
'extract_response':{
'PASSPORT_NUMBER': '',
'FULLNAME': '',
'NATIONALITY': '',
'DATE_OF_BIRTH': '11/27/1985',
'PLACE_OF_BIRTH': '11/27/1985',
'SEX': '0TH',
'HEIGHT': '0TH',
'DATE_OF_ISSUE': '2021-10-10',
'DATE_OF_EXPIRY': '2025-10-10'
}
}
PASSPORT = {
'FULLNAME': '',
'NATIONALITY': '',
'DATE_OF_BIRTH': '',
'PLACE_OF_BIRTH': '',
'SEX': '',
'HEIGHT': '',
'DATE_OF_ISSUE': '',
'DATE_OF_EXPIRY': ''
}
def textract_analyze_form_s3(client, bucket, document):
resp = {}
try:
resp = client.analyze_document(
Document={'S3Object': {'Bucket': bucket, 'Name': document}},
FeatureTypes=['FORMS']
)
except Exception as e:
print('Error processing form analysis:', e)
resp = {}
return resp
def textract_detect_text_s3(client, bucket, document):
resp = {}
try:
resp = client.detect_document_text(
Document={'S3Object': {'Bucket': bucket, 'Name': document}}
)
except Exception as e:
print('Error processing text detection:', e)
resp = {}
return resp
def textract_analyze_form_docs(client, image):
resp = {}
try:
resp = client.analyze_document(
Document={'Bytes': base64.b64decode(image)},
FeatureTypes=['FORMS']
)
except Exception as e:
print('Error processing form analysis:', e)
resp = {}
return resp
def textract_detect_text_docs(client, image):
resp = {}
try:
resp = client.detect_document_text(
Document={'Bytes': base64.b64decode(image)}
)
except Exception as e:
print('Error processing text detection:', e)
resp = {}
return resp
def textract_process_s3(client, bucket, document):
resp = {}
try:
resp = textract_detect_text_s3(client, bucket, document)
# resp = textract_analyze_form_s3(client, bucket, document)
except Exception as e:
print('Error processing textract-s3')
print('Error:', e)
resp = {}
return resp
def textract_process_docs(client, image):
resp = {}
try:
# resp = textract_detect_text_docs(client, image)
resp = textract_analyze_form_docs(client, image)
except Exception as e:
print('Error processing textract-s3')
print('Error:', e)
resp = {}
return resp
def get_lines_from_textract(textract_resp):
blocks = textract_resp['Blocks']
linelist = []
for item in blocks:
if item['BlockType'] == 'LINE':
# print(item['Text'])
linelist.append(item['Text'])
# print(linelist)
return linelist
def get_words_from_textract(textract_resp):
blocks = textract_resp['Blocks']
wordlist = []
for item in blocks:
if item['BlockType'] == 'WORD':
# print(item['Text'])
wordlist.append(item['Text'])
# print(wordlist)
return wordlist
def parse_key_value(textract_response):
key_map, value_map, block_map, lineblock_list = get_kv_map(textract_response)
# pprint(value_map)
# Get Key Value relationship
kvs = get_kv_relationship(key_map, value_map, block_map)
# print("\n\n== FOUND KEY : VALUE pairs ===\n")
# print_kvs(kvs)
return kvs
def get_kv_map(response):
# Get the text blocks
blocks=response['Blocks']
# get key and value maps
key_map = {}
value_map = {}
block_map = {}
lineblock_list = []
for block in blocks:
block_id = block['Id']
block_map[block_id] = block
if block['BlockType'] == "KEY_VALUE_SET":
if 'KEY' in block['EntityTypes']:
key_map[block_id] = block
else:
value_map[block_id] = block
if block['BlockType'] == "LINE":
lineblock_list.append(block)
return key_map, value_map, block_map, lineblock_list
def get_kv_relationship(key_map, value_map, block_map):
kvs = {}
val_dict = {}
position = []
for block_id, key_block in key_map.items():
value_block = find_value_block(key_block, value_map)
key = get_text(key_block, block_map)
top = round(key_block['Geometry']['BoundingBox']['Top'], 2)
position.append(top)
left = round(key_block['Geometry']['BoundingBox']['Left'], 2)
confidence = key_block['Confidence']
val = get_text(value_block, block_map)
val_dict = {
# 'key': key,
'values': val,
'confidence': confidence,
'top': top,
'left': left,
}
kvs[key] = val_dict
ordered_kvs = {}
# ordered_key = sorted(kvs, key=lambda k: kvs[k]['top'])
ordered_key = sorted(kvs, key=lambda k: (kvs[k]['top'], kvs[k]['left']))
ordered_kvs = {k: kvs[k] for k in ordered_key}
return ordered_kvs
def find_value_block(key_block, value_map):
for relationship in key_block['Relationships']:
if relationship['Type'] == 'VALUE':
for value_id in relationship['Ids']:
value_block = value_map[value_id]
return value_block
def get_text(result, blocks_map):
text = ''
if 'Relationships' in result:
for relationship in result['Relationships']:
if relationship['Type'] == 'CHILD':
for child_id in relationship['Ids']:
word = blocks_map[child_id]
if word['BlockType'] == 'WORD':
text += word['Text'] + ' '
if word['BlockType'] == 'SELECTION_ELEMENT':
if word['SelectionStatus'] == 'SELECTED':
text += 'X '
return text
def get_position(result, blocks_map):
text = ''
if 'Relationships' in result:
for relationship in result['Relationships']:
if relationship['Type'] == 'CHILD':
for child_id in relationship['Ids']:
word = blocks_map[child_id]
if word['BlockType'] == 'WORD':
text += word['Text'] + ' '
if word['BlockType'] == 'SELECTION_ELEMENT':
if word['SelectionStatus'] == 'SELECTED':
text += 'X '
return text
def parse_lines(textract_response):
key_map, value_map, block_map, lineblock_list = get_kv_map(textract_response)
linelist = get_lines(lineblock_list)
std_lines = build_per_lines_text(linelist)
return std_lines
def get_lines(linesblock_list):
linetext_list = []
line_map = {}
line_map_list = []
for item in linesblock_list:
linetext_list.append(item['Text'])
line_id = item['Id']
line_text = item['Text']
line_confidence = item['Confidence']
line_top = round(item['Geometry']['BoundingBox']['Top'], 2)
line_left = round(item['Geometry']['BoundingBox']['Left'], 2)
line_map = {
'line_id': line_id,
'line_text': line_text,
'line_top': line_top,
'line_left': line_left,
'line_confidence': line_confidence,
}
line_map_list.append(line_map)
# Sort lines based on top value:
sortedlines = sorted(line_map_list, key=lambda d: (d['line_top'], d['line_left']))
# pprint(sortedlines)
return sortedlines
def build_per_lines_text(linelist):
text = ''
line_text = []
current_top = 0
for item in linelist:
top = item['line_top']
if top == current_top:
text += item['line_text'] + ' '
else:
line_text.append(text)
text = item['line_text']
current_top = top
# fill with empty string:
line_text = line_text + [''] * (30 - len(line_text))
return line_text[1:]
def parse_passport_id(textract_list, country):
passport_id = 'PASSPORT-1234'
passport_id_regex = re.compile('^\w{1,2}\s?\d{1,}$')
passport_my_regex = re.compile('^A\s?\d{8}$')
passport_th_regex = re.compile('^\w{1,2}\d{6,7}\w{0,1}$')
passport_vn_regex = re.compile('^\w{1,2}\d{6,7}\w{0,1}$')
passport_ph_regex = re.compile('^\w{1,2}\d{6,7}\w{0,1}$')
if country == 'ID':
passport_id_list = list(filter(passport_id_regex.match, textract_list))
if country == 'MY':
passport_id_list = list(filter(passport_my_regex.match, textract_list))
if country == 'PH':
passport_id_list = list(filter(passport_ph_regex.match, textract_list))
if country == 'TH':
passport_id_list = list(filter(passport_th_regex.match, textract_list))
if country == 'VN':
passport_id_list = list(filter(passport_vn_regex.match, textract_list))
passport_id = passport_id_list[0] if passport_id_list else '',
return passport_id
def parse_identity_id(textract_list, country):
identity_id = 'IDENTITY-1234'
identity_id_regex = re.compile('^\:?\s{0,2}\d{16}$')
identity_my_regex = re.compile('^\d{6}\-\d{2}\-\d{4}$')
identity_th_regex = re.compile('^\d{1}\s\d{4}\s\d{5}\s\d{2}\s\d{1}$')
identity_vn_regex = re.compile('^[Ss]?[Oo]?\/?[Nn]?[Oo]?\:?\s{0,2}\d{9}$')
identity_ph_regex = re.compile('')
if country == 'ID':
identity_id_list = list(filter(identity_id_regex.match, textract_list))
if country == 'MY':
identity_id_list = list(filter(identity_my_regex.match, textract_list))
if country == 'PH':
identity_id_list = list(filter(identity_ph_regex.match, textract_list))
if country == 'TH':
identity_id_list = list(filter(identity_th_regex.match, textract_list))
if country == 'VN':
identity_id_list = list(filter(identity_vn_regex.match, textract_list))
identity_id = identity_id_list[0] if identity_id_list else '',
return identity_id
def parse_driver_id(textract_list, country):
driver_id = 'DRIVER-1234'
driver_id_regex = re.compile('^\:?\s{0,3}(\d{4}-\d{4}-\d{6})|(\d{12})$')
driver_my_regex = re.compile('^\:?\s{0,3}\d{12}$')
driver_th_regex = re.compile('^\w{1,2}\d{6,7}\w{0,1}$')
driver_vn_regex = re.compile('^\:?\s{0,3}\d{12}$')
driver_ph_regex = re.compile('^\:?\s{0,3}\w\d{2}\-\d{2}-\d{6}$')
if country == 'ID':
driver_id_list = list(filter(driver_id_regex.match, textract_list))
if country == 'MY':
driver_id_list = list(filter(driver_my_regex.match, textract_list))
if country == 'PH':
driver_id_list = list(filter(driver_ph_regex.match, textract_list))
if country == 'TH':
driver_id_list = list(filter(driver_th_regex.match, textract_list))
if country == 'VN':
driver_id_list = list(filter(driver_vn_regex.match, textract_list))
driver_id = driver_id_list[0] if driver_id_list else '',
return driver_id
def identity_parser(kvs, country):
IDENTITY_CARD = {
'LASTNAME': '',
'FIRSTNAME': '',
'MIDDLENAME': '',
'DATE_OF_BIRTH': '',
'PLACE_OF_BIRTH': '',
'ADDRESS': '',
'IDENTITY_NUMBER': '',
'EXPIRATION_DATE': '',
'SEX': '',
'NATIONALITY': '',
'OCCUPATION': ''
}
# Indonesia
if country == 'ID':
IDENTITY_CARD['FIRSTNAME'] = kvs[3] if len(kvs) > 3 else ''
IDENTITY_CARD['MIDDLENAME'] = kvs[3] if len(kvs) > 3 else ''
IDENTITY_CARD['LASTNAME'] = ''
IDENTITY_CARD['DATE_OF_BIRTH'] = kvs[4] if len(kvs) > 4 else ''
IDENTITY_CARD['PLACE_OF_BIRTH'] = kvs[4] if len(kvs) > 4 else ''
IDENTITY_CARD['ADDRESS'] = ' '.join(kvs[6:9]) if len(kvs) > 9 else ''
IDENTITY_CARD['IDENTITY_NUMBER'] = kvs[3] if len(kvs) > 3 else ''
IDENTITY_CARD['EXPIRATION_DATE'] = kvs[14] if len(kvs) > 14 else ''
IDENTITY_CARD['SEX'] = kvs[5] if len(kvs) > 5 else ''
IDENTITY_CARD['NATIONALITY'] = kvs[15] if len(kvs) > 15 else ''
IDENTITY_CARD['OCCUPATION'] = kvs[13] if len(kvs) > 13 else ''
# Malaysia
if country == 'MY':
IDENTITY_CARD['FIRSTNAME'] = kvs[4] if len(kvs) > 4 else ''
IDENTITY_CARD['MIDDLENAME'] = kvs[4] if len(kvs) > 4 else ''
IDENTITY_CARD['LASTNAME'] = kvs[4] if len(kvs) > 4 else ''
IDENTITY_CARD['DATE_OF_BIRTH'] = ''
IDENTITY_CARD['PLACE_OF_BIRTH'] = ''
IDENTITY_CARD['ADDRESS'] = ' '.join([kvs[i] for i in [5,6,8]]) if len(kvs) > 9 else ''
IDENTITY_CARD['IDENTITY_NUMBER'] = kvs[3] if len(kvs) > 3 else ''
IDENTITY_CARD['EXPIRATION_DATE'] = kvs[14] if len(kvs) > 14 else ''
IDENTITY_CARD['SEX'] = kvs[9] if len(kvs) > 9 else ''
IDENTITY_CARD['NATIONALITY'] = kvs[7] if len(kvs) > 7 else ''
IDENTITY_CARD['OCCUPATION'] = kvs[13] if len(kvs) > 13 else ''
# Philippines
if country == 'PH':
IDENTITY_CARD['FIRSTNAME'] = kvs[8] if len(kvs) > 8 else ''
IDENTITY_CARD['MIDDLENAME'] = kvs[11] if len(kvs) > 11 else ''
IDENTITY_CARD['LASTNAME'] = kvs[6] if len(kvs) > 6 else ''
IDENTITY_CARD['DATE_OF_BIRTH'] = kvs[15] if len(kvs) > 15 else ''
IDENTITY_CARD['PLACE_OF_BIRTH'] = kvs[17] if len(kvs) > 17 else ''
IDENTITY_CARD['ADDRESS'] = ' '.join([kvs[i] for i in [20, 22]]) if len(kvs) > 22 else ''
IDENTITY_CARD['IDENTITY_NUMBER'] = kvs[4] if len(kvs) > 4 else ''
IDENTITY_CARD['EXPIRATION_DATE'] = ''
IDENTITY_CARD['SEX'] = kvs[13] if len(kvs) > 13 else ''
IDENTITY_CARD['NATIONALITY'] = ''
IDENTITY_CARD['OCCUPATION'] = ''
pass
# Thailand
if country == 'TH':
IDENTITY_CARD['FIRSTNAME'] = kvs[5] if len(kvs) > 5 else ''
IDENTITY_CARD['MIDDLENAME'] = ''
IDENTITY_CARD['LASTNAME'] = kvs[6] if len(kvs) > 6 else ''
IDENTITY_CARD['DATE_OF_BIRTH'] = kvs[8] if len(kvs) > 8 else ''
IDENTITY_CARD['PLACE_OF_BIRTH'] = ''
IDENTITY_CARD['ADDRESS'] = ''
IDENTITY_CARD['IDENTITY_NUMBER'] = kvs[1] if len(kvs) > 1 else ''
IDENTITY_CARD['EXPIRATION_DATE'] = kvs[19] if len(kvs) > 19 else ''
IDENTITY_CARD['SEX'] = ''
IDENTITY_CARD['NATIONALITY'] = ''
IDENTITY_CARD['OCCUPATION'] = ''
# Vietnam
if country == 'VN':
IDENTITY_CARD['FIRSTNAME'] = kvs[9] if len(kvs) > 9 else ''
IDENTITY_CARD['MIDDLENAME'] = ''
IDENTITY_CARD['LASTNAME'] = kvs[9] if len(kvs) > 9 else ''
IDENTITY_CARD['DATE_OF_BIRTH'] = kvs[11] if len(kvs) > 11 else ''
IDENTITY_CARD['PLACE_OF_BIRTH'] = kvs[14] if len(kvs) > 14 else ''
IDENTITY_CARD['ADDRESS'] = kvs[16] if len(kvs) > 16 else ''
IDENTITY_CARD['IDENTITY_NUMBER'] = kvs[7] if len(kvs) > 7 else ''
IDENTITY_CARD['EXPIRATION_DATE'] = kvs[17] if len(kvs) > 17 else ''
IDENTITY_CARD['SEX'] = kvs[12] if len(kvs) > 12 else ''
IDENTITY_CARD['NATIONALITY'] = kvs[12] if len(kvs) > 12 else ''
IDENTITY_CARD['OCCUPATION'] = ''
return IDENTITY_CARD
def driver_parser(kvs, country):
DRIVER_LICENSE = {
'LASTNAME': '',
'FIRSTNAME': '',
'MIDDLENAME': '',
'DATE_OF_BIRTH': '',
'ADDRESS': '',
'LICENSE_NUMBER': '',
'EXPIRATION_DATE': '',
'AGENCY_CODE': '',
'RESTRICTIONS': '',
'CONDITIONS': '',
'BLOOD_TYPE': '',
'EYES_COLOR': '',
'SEX': '0TH',
'HEIGHT_METER': '',
'WEIGHT_KG': ''
}
# Indonesia
if country == 'ID':
DRIVER_LICENSE['LASTNAME'] = ' '.join([kvs[i] for i in [5]])
DRIVER_LICENSE['FIRSTNAME'] = ' '.join([kvs[i] for i in [5]])
DRIVER_LICENSE['MIDDLENAME'] = ''
DRIVER_LICENSE['DATE_OF_BIRTH'] = ' '.join([kvs[i] for i in [8]])
DRIVER_LICENSE['ADDRESS'] = ''
DRIVER_LICENSE['LICENSE_NUMBER'] = ' '.join([kvs[i] for i in [11, 12]])
DRIVER_LICENSE['EXPIRATION_DATE'] = ' '.join([kvs[i] for i in [13]])
DRIVER_LICENSE['AGENCY_CODE'] = ''
DRIVER_LICENSE['RESTRICTIONS'] = ''
DRIVER_LICENSE['CONDITIONS'] = ''
DRIVER_LICENSE['BLOOD_TYPE'] = ''
DRIVER_LICENSE['EYES_COLOR'] = ''
DRIVER_LICENSE['SEX'] = ' '.join([kvs[i] for i in [6]])
DRIVER_LICENSE['HEIGHT_METER'] = ' '.join([kvs[i] for i in [9]])
DRIVER_LICENSE['WEIGHT_KG'] = ''
# Malaysia
if country == 'MY':
DRIVER_LICENSE['LASTNAME'] = ' '.join([kvs[i] for i in [3]])
DRIVER_LICENSE['FIRSTNAME'] = ' '.join([kvs[i] for i in [3]])
DRIVER_LICENSE['MIDDLENAME'] = ''
DRIVER_LICENSE['DATE_OF_BIRTH'] = ''
DRIVER_LICENSE['ADDRESS'] = ' '.join([kvs[i] for i in [12,13,14,15]])
DRIVER_LICENSE['LICENSE_NUMBER'] = ' '.join([kvs[i] for i in [5,6]])
DRIVER_LICENSE['EXPIRATION_DATE'] = ' '.join([kvs[i] for i in [9,10]])
DRIVER_LICENSE['AGENCY_CODE'] = ''
DRIVER_LICENSE['RESTRICTIONS'] = ''
DRIVER_LICENSE['CONDITIONS'] = ''
DRIVER_LICENSE['BLOOD_TYPE'] = ''
DRIVER_LICENSE['EYES_COLOR'] = ''
DRIVER_LICENSE['SEX'] = ''
DRIVER_LICENSE['HEIGHT_METER'] = ''
DRIVER_LICENSE['WEIGHT_KG'] = ''
# Philippines
if country == 'PH':
DRIVER_LICENSE['LASTNAME'] = ' '.join([kvs[i] for i in [7,8]])
DRIVER_LICENSE['FIRSTNAME'] = ' '.join([kvs[i] for i in [7,8]])
DRIVER_LICENSE['MIDDLENAME'] = ' '.join([kvs[i] for i in [7,8]])
DRIVER_LICENSE['DATE_OF_BIRTH'] = ' '.join([kvs[i] for i in [11]])
DRIVER_LICENSE['ADDRESS'] = ''
DRIVER_LICENSE['LICENSE_NUMBER'] = ' '.join([kvs[i] for i in [16]])
DRIVER_LICENSE['EXPIRATION_DATE'] = ' '.join([kvs[i] for i in [16]])
DRIVER_LICENSE['AGENCY_CODE'] = ' '.join([kvs[i] for i in [16]])
DRIVER_LICENSE['RESTRICTIONS'] = ' '.join([kvs[i] for i in [18]])
DRIVER_LICENSE['CONDITIONS'] = ' '.join([kvs[i] for i in [18]])
DRIVER_LICENSE['BLOOD_TYPE'] = ' '.join([kvs[i] for i in [16]])
DRIVER_LICENSE['EYES_COLOR'] = ''
DRIVER_LICENSE['SEX'] = ' '.join([kvs[i] for i in [10]])
DRIVER_LICENSE['HEIGHT_METER'] = ' '.join([kvs[i] for i in [10]])
DRIVER_LICENSE['WEIGHT_KG'] = ' '.join([kvs[i] for i in [10]])
# Thailand
if country == 'TH':
DRIVER_LICENSE['LASTNAME'] = ' '.join([kvs[i] for i in [8]])
DRIVER_LICENSE['FIRSTNAME'] = ' '.join([kvs[i] for i in [8]])
DRIVER_LICENSE['MIDDLENAME'] = ''
DRIVER_LICENSE['DATE_OF_BIRTH'] = ' '.join([kvs[i] for i in [11]])
DRIVER_LICENSE['ADDRESS'] = ''
DRIVER_LICENSE['LICENSE_NUMBER'] = ' '.join([kvs[i] for i in [12]])
DRIVER_LICENSE['EXPIRATION_DATE'] = ' '.join([kvs[i] for i in [5]])
DRIVER_LICENSE['AGENCY_CODE'] = ''
DRIVER_LICENSE['RESTRICTIONS'] = ''
DRIVER_LICENSE['CONDITIONS'] = ''
DRIVER_LICENSE['BLOOD_TYPE'] = ''
DRIVER_LICENSE['EYES_COLOR'] = ''
DRIVER_LICENSE['SEX'] = ''
DRIVER_LICENSE['HEIGHT_METER'] = ''
DRIVER_LICENSE['WEIGHT_KG'] = ''
# Vietnam
if country == 'VN':
DRIVER_LICENSE['LASTNAME'] = ''
DRIVER_LICENSE['FIRSTNAME'] = ' '.join([kvs[i] for i in [6,7]])
DRIVER_LICENSE['MIDDLENAME'] = ''
DRIVER_LICENSE['DATE_OF_BIRTH'] = ' '.join([kvs[i] for i in [8]])
DRIVER_LICENSE['ADDRESS'] = ' '.join([kvs[i] for i in [11,12]])
DRIVER_LICENSE['LICENSE_NUMBER'] = ' '.join([kvs[i] for i in [18]])
DRIVER_LICENSE['EXPIRATION_DATE'] = ' '.join([kvs[i] for i in [20]])
DRIVER_LICENSE['AGENCY_CODE'] = ' '.join([kvs[i] for i in [5]])
DRIVER_LICENSE['RESTRICTIONS'] = ''
DRIVER_LICENSE['CONDITIONS'] = ''
DRIVER_LICENSE['BLOOD_TYPE'] = ''
DRIVER_LICENSE['EYES_COLOR'] = ''
DRIVER_LICENSE['SEX'] = ''
DRIVER_LICENSE['HEIGHT_METER'] = ''
DRIVER_LICENSE['WEIGHT_KG'] = ''
return DRIVER_LICENSE
def passport_parser(kvs, country):
PASSPORT = {
'FULLNAME': '',
'NATIONALITY': '',
'DATE_OF_BIRTH': '',
'PLACE_OF_BIRTH': '',
'SEX': '',
'HEIGHT': '',
'DATE_OF_ISSUE': '',
'DATE_OF_EXPIRY': '',
'PASSPORT_NUMBER': ''
}
# Indonesia
if country == 'ID':
PASSPORT['FULLNAME'] = ' '.join([kvs[i] for i in [9]])
PASSPORT['NATIONALITY'] = ' '.join([kvs[i] for i in [10]])
PASSPORT['DATE_OF_BIRTH'] = ' '.join([kvs[i] for i in [12]])
PASSPORT['PLACE_OF_BIRTH'] = ' '.join([kvs[i] for i in [13]])
PASSPORT['SEX'] = ' '.join([kvs[i] for i in [9]])
PASSPORT['HEIGHT'] = ''
PASSPORT['DATE_OF_ISSUE'] = ' '.join([kvs[i] for i in [16]])
PASSPORT['DATE_OF_EXPIRY'] = ' '.join([kvs[i] for i in [16]])
PASSPORT['PASSPORT_NUMBER'] = ' '.join([kvs[i] for i in [6]])
# Malaysia
if country == 'MY':
PASSPORT['FULLNAME'] = ' '.join([kvs[i] for i in [6]])
PASSPORT['NATIONALITY'] = ' '.join([kvs[i] for i in [7,9]])
PASSPORT['DATE_OF_BIRTH'] = ' '.join([kvs[i] for i in [12]])
PASSPORT['PLACE_OF_BIRTH'] = ' '.join([kvs[i] for i in [13]])
PASSPORT['SEX'] = ' '.join([kvs[i] for i in [15]])
PASSPORT['HEIGHT'] = ' '.join([kvs[i] for i in [15]])
PASSPORT['DATE_OF_ISSUE'] = ' '.join([kvs[i] for i in [17]])
PASSPORT['DATE_OF_EXPIRY'] = ' '.join([kvs[i] for i in [19]])
PASSPORT['PASSPORT_NUMBER'] = ' '.join([kvs[i] for i in [4]])
# Philippines
if country == 'PH':
PASSPORT['FULLNAME'] = ' '.join([kvs[i] for i in [7, 9, 5]])
PASSPORT['NATIONALITY'] = ' '.join([kvs[i] for i in [11]])
PASSPORT['DATE_OF_BIRTH'] = ' '.join([kvs[i] for i in [11]])
PASSPORT['PLACE_OF_BIRTH'] = ' '.join([kvs[i] for i in [13]])
PASSPORT['SEX'] = ''
PASSPORT['HEIGHT'] = ''
PASSPORT['DATE_OF_ISSUE'] = ' '.join([kvs[i] for i in [15]])
PASSPORT['DATE_OF_EXPIRY'] = ' '.join([kvs[i] for i in [17]])
PASSPORT['PASSPORT_NUMBER'] = ' '.join([kvs[i] for i in [3]])
# Thailand
if country == 'TH':
PASSPORT['FULLNAME'] = ' '.join([kvs[i] for i in [7]])
PASSPORT['NATIONALITY'] = ' '.join([kvs[i] for i in [11]])
PASSPORT['DATE_OF_BIRTH'] = ' '.join([kvs[i] for i in [12]])
PASSPORT['PLACE_OF_BIRTH'] = ' '.join([kvs[i] for i in [13]])
PASSPORT['SEX'] = ' '.join([kvs[i] for i in [15]])
PASSPORT['HEIGHT'] = ' '.join([kvs[i] for i in [15]])
PASSPORT['DATE_OF_ISSUE'] = ' '.join([kvs[i] for i in [17]])
PASSPORT['DATE_OF_EXPIRY'] = ' '.join([kvs[i] for i in [20]])
PASSPORT['PASSPORT_NUMBER'] = ' '.join([kvs[i] for i in [2]])
# Vietnam
if country == 'VN':
PASSPORT['FULLNAME'] = ' '.join([kvs[i] for i in [6]])
PASSPORT['NATIONALITY'] = ' '.join([kvs[i] for i in [7]])
PASSPORT['DATE_OF_BIRTH'] = ' '.join([kvs[i] for i in [10]])
PASSPORT['PLACE_OF_BIRTH'] = ' '.join([kvs[i] for i in [10]])
PASSPORT['SEX'] = ' '.join([kvs[i] for i in [12]])
PASSPORT['HEIGHT'] = ''
PASSPORT['DATE_OF_ISSUE'] = ' '.join([kvs[i] for i in [14]])
PASSPORT['DATE_OF_EXPIRY'] = ' '.join([kvs[i] for i in [14]])
PASSPORT['PASSPORT_NUMBER'] = ' '.join([kvs[i] for i in [4]])
return PASSPORT
def check_document_type(resp):
document_content = {}
if resp['document_type'] == 'PASSPORT':
document_content = PASSPORT
if resp['document_type'] == 'DRIVER_LICENSE':
document_content = DRIVER_LICENSE
if resp['document_type'] == 'IDENTITY_CARD':
document_content = IDENTITY_CARD
return document_content
def parse_document(textract_resp):
resp = {
'document_type': 'unidentified', # Identity Card | Driver License | Unidentified
'country': 'unidentified', # Indonesia | Malaysian | Thailand | Phillipines | Vietnam
'textract_response': {},
'key_values': {}
}
if textract_resp == {}:
return resp
kv = parse_key_value(textract_resp)
print('key values', kv)
item_list = list(kv.items())
resp['key_values'] = kv
standardized_lines = parse_lines(textract_resp)
linelist = get_lines_from_textract(textract_resp)
linelist_standardized = [w.lower() for w in linelist]
print('linelist:', linelist_standardized)
wordlist = get_words_from_textract(textract_resp)
wordlist_standardized = [w.lower() for w in wordlist]
resp['context'] = linelist
print(wordlist_standardized)
# Passport
PASSPORT_PHRASE = ['passport', 'paspor', 'pasaporte', 'pasport', 'hô chiéu']
if any(x in wordlist_standardized for x in PASSPORT_PHRASE):
if 'indonesia' in wordlist_standardized:
resp['document_type'] = DOCUMENT_TYPE_ENUM['passport']
resp['country'] = COUNTRY_ENUM['ID']
resp['textract_response'] = passport_parser(standardized_lines, 'ID')
print(resp)
return resp
if 'malaysia' in wordlist_standardized:
resp['document_type'] = DOCUMENT_TYPE_ENUM['passport']
resp['country'] = COUNTRY_ENUM['MY']
resp['textract_response'] = passport_parser(standardized_lines, 'TH')
return resp
if 'philippines' in wordlist_standardized:
resp['document_type'] = DOCUMENT_TYPE_ENUM['passport']
resp['country'] = COUNTRY_ENUM['PH']
resp['textract_response'] = passport_parser(standardized_lines, 'PH')
return resp
if 'thailand' in wordlist_standardized or 'thai' in wordlist_standardized:
resp['document_type'] = DOCUMENT_TYPE_ENUM['passport']
resp['country'] = COUNTRY_ENUM['TH']
resp['textract_response'] = passport_parser(standardized_lines, 'TH')
return resp
if 'vietnam' in wordlist_standardized:
resp['document_type'] = DOCUMENT_TYPE_ENUM['passport']
resp['country'] = COUNTRY_ENUM['VN']
resp['textract_response'] = passport_parser(standardized_lines, 'VN')
return resp
# Indonesian
if 'nik' in linelist_standardized or 'provinsi' in linelist_standardized:
resp['document_type'] = DOCUMENT_TYPE_ENUM['identity_card']
resp['country'] = COUNTRY_ENUM['ID']
resp['textract_response'] = identity_parser(standardized_lines, 'ID')
return resp
if 'surat izin mengemudi' in linelist_standardized or 'kepolisian negara' in linelist_standardized:
print(DOCUMENT_TYPE_ENUM)
resp['document_type'] = DOCUMENT_TYPE_ENUM['driving_license']
resp['country'] = COUNTRY_ENUM['ID']
resp['textract_response'] = driver_parser(standardized_lines, 'ID')
return resp
# Phillipines
if 'philippine identification card' in linelist_standardized:
resp['document_type'] = DOCUMENT_TYPE_ENUM['identity_card']
resp['country'] = COUNTRY_ENUM['PH']
resp['textract_response'] = identity_parser(standardized_lines, 'PH')
return resp
if all(x in linelist_standardized for x in ['land transportation office', 'republic of the philippines']):
resp['document_type'] = DOCUMENT_TYPE_ENUM['driving_license']
resp['country'] = COUNTRY_ENUM['PH']
resp['textract_response'] = driver_parser(standardized_lines, 'PH')
return resp
# Malaysian
if 'MyKad' in linelist_standardized or 'kad pengenalan malaysia' in linelist_standardized or 'malaysia' in linelist_standardized:
resp['document_type'] = DOCUMENT_TYPE_ENUM['identity_card']
resp['country'] = COUNTRY_ENUM['MY']
resp['textract_response'] = identity_parser(standardized_lines, 'MY')
return resp
if 'lesen memandu' in linelist_standardized or all(x in linelist_standardized for x in ['driving license', 'malaysia']):
resp['document_type'] = DOCUMENT_TYPE_ENUM['driving_license']
resp['country'] = COUNTRY_ENUM['MY']
resp['textract_response'] = driver_parser(standardized_lines, 'MY')
return resp
# Thailand
if 'thai' in linelist_standardized or 'thai national id card' in linelist_standardized:
resp['document_type'] = DOCUMENT_TYPE_ENUM['identity_card']
resp['country'] = COUNTRY_ENUM['TH']
resp['textract_response'] = identity_parser(standardized_lines, 'TH')
return resp
if all(x in linelist_standardized for x in ['driving license', 'kingdom of thailand']):
resp['document_type'] = DOCUMENT_TYPE_ENUM['driving_license']
resp['country'] = COUNTRY_ENUM['TH']
resp['textract_response'] = driver_parser(standardized_lines, 'TH')
return resp
# Vietnam
if 'socialist republic of vietnam' in linelist_standardized or 'socialist republic of viet nam' in linelist_standardized or 'citizen identity card' in linelist_standardized:
resp['document_type'] = DOCUMENT_TYPE_ENUM['identity_card']
resp['country'] = COUNTRY_ENUM['VN']
resp['textract_response'] = identity_parser(standardized_lines, 'VN')
return resp
if all(x in wordlist_standardized for x in ['license', 'viêt', 'nam']):
resp['document_type'] = DOCUMENT_TYPE_ENUM['driving_license']
resp['country'] = COUNTRY_ENUM['VN']
resp['textract_response'] = driver_parser(standardized_lines, 'VN')
return resp
return resp
def build_response(resp):
document_content = check_document_type(resp)
resp_v1 = {
'statusCode': 200,
'body': {
'country': resp['country'],
'document_type':resp['document_type'],
'textract_response': document_content
}
}
resp_v2 = {
'country': resp['country'],
'document_type':resp['document_type'],
'textract_response': document_content
}
resp_v3 = {
'country': resp['country'],
'document_type':resp['document_type'],
'textract_response': resp['textract_response'],
'key_values': resp['key_values']
}
return resp_v3
def lambda_handler(event, context):
#print("Received event: " + json.dumps(event, indent=2))
print(event)
imageBase64 = event['base64Image']
# print(type(imageBase64))
resp = textract_process_docs(textractClient, imageBase64)
# print('textract response:', resp)
parsed_resp = parse_document(resp)
print(parsed_resp)
final_response = build_response(parsed_resp)
return final_response
| 2.328125 | 2 |
optimizedGPS/results_provider/pre_solver_stats.py | mickael-grima/optimizedGPS | 0 | 12771726 | <reponame>mickael-grima/optimizedGPS<gh_stars>0
from collections import defaultdict
from optimizedGPS.data.data_generator import generate_grid_data, generate_random_drivers
from optimizedGPS.problems.PreSolver import GlobalPreSolver
from optimizedGPS.problems.Solver import Solver
def get_percentage(part, total, trunc=2):
return int(float(part) / total * (10 ** (trunc + 2))) / float((10 ** trunc))
def get_simple_stats():
graph = generate_grid_data(7, 7)
drivers_graph = generate_random_drivers(graph, 15)
p = GlobalPreSolver(graph, drivers_graph)
m = p.map_reachable_edges_for_drivers()
stats = {
"drivers": {
id(driver): {
"info": (driver.start, driver.end, driver.time),
"stats": {
"edge_ratio": "%s%%" % get_percentage(len(m[driver]), graph.number_of_edges())
}
} for driver in drivers_graph.get_all_drivers()
},
"edges": {
edge: {
"stats": {
"drivers_ratio": "%s%%" % get_percentage(len(filter(lambda d: edge in m[d], m.keys())),
drivers_graph.number_of_drivers())
}
} for edge in graph.edges()
},
"general": {
"unused_edges": len(filter(
lambda edge: len(filter(lambda d: edge in m[d], m.keys())) == 0,
graph.edges()
)),
"full_used_edges": len(filter(
lambda edge: len(filter(lambda d: edge in m[d], m.keys())) == drivers_graph.number_of_drivers(),
graph.edges()
)),
"edges_uses_repartition": defaultdict(lambda: 0)
}
}
for edge, data in stats["edges"].iteritems():
stats["general"]["edges_uses_repartition"][data["stats"]["drivers_ratio"]] += 1
for ratio in stats["general"]["edges_uses_repartition"].iterkeys():
stats["general"]["edges_uses_repartition"][ratio] = "%s%%" % get_percentage(
stats["general"]["edges_uses_repartition"][ratio],
graph.number_of_edges()
)
return stats
def get_average_unused_edges(length=5, width=5, number_of_drivers=10, niter=10):
"""
iterate niter times on grid graph with given parameters, and return the average number of unused edges.
:param length:
:param width:
:param number_of_drivers:
:param niter:
:return:
"""
ratios = []
for i in range(niter):
graph = generate_grid_data(length=length, width=width)
drivers_graph = generate_random_drivers(graph, total_drivers=number_of_drivers)
presolver = GlobalPreSolver(graph, drivers_graph)
ratios.append(get_percentage(len(list(presolver.iter_unused_edges())), graph.number_of_edges()))
return sum(ratios) / float(len(ratios))
def test_running_time():
pass
def compute_driver_influence_on_prersolving(length=3, width=3, niter=10):
unused_edges = {}
for n in range(1, length * width + 1, length):
for _ in range(niter):
graph = generate_grid_data(length=length, width=width)
drivers_graph = generate_random_drivers(graph)
presolver = GlobalPreSolver(graph, drivers_graph)
unused_edges[n] = get_percentage(len(list(presolver.iter_unused_edges())), graph.number_of_edges())
return unused_edges
def compute_variables_reduction(horizon=1000):
"""
Check the reduction for a variable like x_{e,d,t}
"""
graph = generate_grid_data(10, 10)
drivers_graph = generate_random_drivers(graph, 15)
max_nb_var = drivers_graph.number_of_drivers() * graph.number_of_edges() * horizon
solver = Solver(graph, drivers_graph, None, None)
solver.presolve()
value = 0
for driver in drivers_graph.get_all_drivers():
for edge in solver.drivers_structure.get_possible_edges_for_driver(driver):
start, end = solver.drivers_structure.get_safety_interval(driver, edge)
if start >= horizon:
continue
elif end >= horizon:
end = horizon
value += end - start + 1
return 100 - get_percentage(value, max_nb_var, trunc=10)
if __name__ == "__main__":
# stats = get_simple_stats()
# average_unused_edges = get_average_unused_edges(number_of_drivers=50, niter=1)
# unused_edges = compute_driver_influence_on_prersolving(10, 10, 100)
ratio = compute_variables_reduction()
print ratio
| 2.4375 | 2 |
hover/hover/core.py | DouglasOrr/Snippets | 0 | 12771727 | <reponame>DouglasOrr/Snippets
from __future__ import annotations
import collections
from contextlib import contextmanager
from dataclasses import dataclass
import functools as ft
import glob
import json
import itertools as it
import os
import time
import Box2D as B
import IPython.display
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch as T
from . import render
State = collections.namedtuple('State', ('x', 'y', 'a', 'dx', 'dy', 'da'))
@dataclass(frozen=True)
class Outcome:
kind: str
success: bool
duration: float
def to_json(self):
return dict(kind=self.kind, success=self.success, duration=self.duration)
class Game:
def __init__(self, seed=None):
# Settings
self.timestep = 0.01
self.thrust = 15
self.hwidth = 0.4
self.hheight = 2
self.max_time = 20
# Transient
self.elapsed_steps = 0
self.elapsed_time = 0.0
self.control = (False, False)
random = np.random.RandomState(seed) # pylint: disable=no-member
# Box2D/hover.render
self.world = B.b2World(gravity=(0, -10))
self.ground = self.world.CreateStaticBody(
position=[0, -10],
shapes=B.b2PolygonShape(box=(50, 10)),
)
self.rocket = self.world.CreateDynamicBody(
position=[0, 15],
angle=1.0 * (random.rand()-0.5)
)
w = self.hwidth
h = self.hheight
t = 2 * self.hwidth
self.rocket.CreatePolygonFixture(
vertices=[
(-2*w, -h),
(2*w, -h),
(w, t-h),
(-w, t-h),
],
density=1,
friction=1,
)
self.rocket.CreatePolygonFixture(
vertices=[
(-w, t-h),
(w, t-h),
(w, h-w),
(0, h),
(-w, h-w),
],
density=1,
friction=1,
)
d = 2 * self.hwidth
self.left_thruster_shape = render.PolygonShape(
color='orange',
vertices=(
(-2*w, -h),
(-w, -h-d),
(0, -h),
))
self.right_thruster_shape = render.PolygonShape(
color='orange',
vertices=(
(0, -h),
(w, -h-d),
(2*w, -h),
))
@staticmethod
def _convert_body(body, color, extra_shapes=()):
return render.Body(
x=body.position.x,
y=body.position.y,
angle=body.angle,
shapes=tuple(
render.PolygonShape(vertices=tuple(fixture.shape.vertices), color=color)
for fixture in body.fixtures
) + tuple(extra_shapes)
)
def draw(self):
"""Render the game to svg.
returns -- string -- SVG
"""
ground = self._convert_body(self.ground, 'black')
rocket = self._convert_body(
self.rocket, 'blue',
((self.left_thruster_shape,) if self.control[0] else ()) +
((self.right_thruster_shape,) if self.control[1] else ()))
return render.draw(
render.Scene(bounds=(-30, 30, -1, 29),
width=800,
bodies=(ground, rocket)))
def _repr_html_(self):
return self.draw()
@property
def state(self):
"""Returns the State tuple, that describes the rocket."""
position = self.rocket.position
angle = self.rocket.angle
dposition = self.rocket.linearVelocity
dangle = self.rocket.angularVelocity
return State(position.x, position.y, angle, dposition.x, dposition.y, dangle)
def _in_bounds(self):
position = self.rocket.position
angle = self.rocket.angle
return abs(position.x) < 20 and 4 <= position.y < 25 and abs(angle) < 1.5
def step(self, control):
"""Take a single step in the game.
control -- (float, float) -- (fire_left, fire_right) -- thrusters
returns -- Outcome|None -- outcome of the game, if finished
"""
self.control = control
thrust_v = self.rocket.GetWorldVector([0, self.rocket.mass * self.thrust])
if control[0]:
self.rocket.ApplyForce(thrust_v, self.rocket.GetWorldPoint([-self.hwidth, -self.hheight]), True)
if control[1]:
self.rocket.ApplyForce(thrust_v, self.rocket.GetWorldPoint([self.hwidth, -self.hheight]), True)
self.world.Step(self.timestep, 5, 5)
self.elapsed_steps += 1
self.elapsed_time += self.timestep
if self.max_time <= self.elapsed_time:
return Outcome('timeout', True, self.elapsed_time)
if not self._in_bounds():
return Outcome('outofbounds', False, self.elapsed_time)
def step_multi(self, control, ticks):
"""Take multiple steps with the same control input.
returns -- Outcome|None
"""
for _ in range(ticks):
outcome = self.step(control)
if outcome:
return outcome
@classmethod
def play(cls, agent, steps_per_control=1):
"""Play a complete game and return the outcome."""
game = cls()
control = agent(game.state)
while True:
outcome = game.step(control)
if outcome:
return outcome
if game.elapsed_steps % steps_per_control == 0:
control = agent(game.state)
@classmethod
def play_and_display(cls, agent, steps_per_render=10, steps_per_control=1):
"""Render a game in IPython, as updating HTML."""
game = cls()
display = IPython.display.display(game, display_id=True)
control = agent(game.state)
while True:
for _ in range(steps_per_render):
outcome = game.step(control)
if outcome:
return outcome
if game.elapsed_steps % steps_per_control == 0:
control = agent(game.state)
display.update(game)
time.sleep(game.timestep * steps_per_render)
class Report:
################################################################################
# Saving
@staticmethod
def _evaluate_one(agent):
return Game.play(agent).to_json()
@staticmethod
@contextmanager
def _mapper(nproc):
if nproc == 1:
yield map
else:
with T.multiprocessing.Pool(nproc) as pool:
yield pool.map
@staticmethod
def _open_write(path, mode='w'):
parent = os.path.dirname(path)
if not os.path.isdir(parent):
os.makedirs(parent)
return open(path, mode)
@classmethod
def about(cls, path, name, kind, **args):
with cls._open_write(os.path.join(path, 'about.json')) as file:
json.dump(dict(name=name, kind=kind, **args), file)
@classmethod
def test(cls, path, agent, ngames, nproc=T.multiprocessing.cpu_count()):
with cls._mapper(nproc) as mapper, \
cls._open_write(os.path.join(path, 'test.jsonl')) as file:
for result in mapper(cls._evaluate_one, it.repeat(agent, ngames)):
json.dump(result, file)
file.write('\n')
@classmethod
def agent(cls, path, agent):
with cls._open_write(os.path.join(path, 'agent.pkl'), 'wb') as file:
T.save(agent, file)
class Training:
def __init__(self, root):
self._root = root
self._logs = {}
self._t0 = time.time()
if not os.path.isdir(root):
os.makedirs(root)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
for file in self._logs.values():
file.close()
def append(self, name, **row):
if name not in self._logs:
self._logs[name] = open(os.path.join(self._root, name + '.jsonl'), 'w')
log = self._logs[name]
json.dump(dict(t=time.time()-self._t0, **row), log)
log.write('\n')
@classmethod
def training(cls, path):
return cls.Training(os.path.join(path, 'training'))
################################################################################
# Loading
@classmethod
def load(cls, root):
parts = []
keys = set([])
for about_path in glob.glob(os.path.join(root, '**/about.json')):
df = pd.read_json(os.path.join(os.path.dirname(about_path), 'test.jsonl'),
lines=True)
with open(about_path) as f:
about = json.load(f)
keys |= about.keys()
for key, value in about.items():
df[key] = value
parts.append(df)
keys = ['kind', 'name'] + list(sorted(keys - {'kind', 'name'}))
return cls(pd.concat(parts), keys)
def __init__(self, data, keys):
self.data = data
self.keys = keys
def _repr_html_(self):
# *1 is a trick to convert booleans to numeric
return (self.data * 1).groupby(list(self.keys)).mean()._repr_html_()
def plot_duration(self):
plt.figure(figsize=(10, 6))
bins = np.logspace(np.floor(np.log10(self.data.duration.min())),
np.ceil(np.log10(self.data.duration.max())),
num=40)
names = sorted(set(self.data.name))
for name in names:
sns.distplot(self.data.duration[self.data.name == name], kde=False, bins=bins)
plt.gca().set_xscale('log')
plt.legend(names)
plt.title('Game duration')
class IntegratorAgent:
"""Turn a continuous agent into a PWM discrete agent (suitable for the game)."""
def __init__(self, agent):
self.agent = agent
self._left = 0
self._right = 0
def __call__(self, state):
ltarget, rtarget = self.agent(state)
self._left += ltarget
self._right += rtarget
left = (1 <= self._left)
right = (1 <= self._right)
self._left -= left
self._right -= right
return left, right
def _constant_agent(left, right, state):
return (left, right)
def constant_agent(left, right):
"""An agent that always returns the same action."""
return ft.partial(_constant_agent, left, right)
| 2.4375 | 2 |
wdwarfdate/Models/MIST/make_one_file.py | rkiman/wdwarfdate | 3 | 12771728 | import numpy as np
from astropy.table import Table
import glob
models = ['MIST_v1.2_feh_m4.00_afe_p0.0_vvcrit0.0_EEPS',
'MIST_v1.2_feh_m4.00_afe_p0.0_vvcrit0.4_EEPS',
'MIST_v1.2_feh_p0.00_afe_p0.0_vvcrit0.0_EEPS',
'MIST_v1.2_feh_p0.00_afe_p0.0_vvcrit0.4_EEPS',
'MIST_v1.2_feh_p0.50_afe_p0.0_vvcrit0.0_EEPS',
'MIST_v1.2_feh_p0.50_afe_p0.0_vvcrit0.4_EEPS']
for model in models:
print(model)
initial_mass = []
ms_age = []
for file in list(glob.glob(model+'/*.txt')):
table = np.loadtxt(file)
n = len(table[:,0])
initial_mass.append(table[0,1])
ms_age.append(table[n-1,0])
summary = Table()
summary['initial_mass'] = initial_mass
summary['ms_age'] = ms_age
summary.write(model+'_sum.csv')
| 2.078125 | 2 |
main/migrations/0010_auto_20180317_1440.py | hectdev/python_sh | 4 | 12771729 | <reponame>hectdev/python_sh
# Generated by Django 2.0.1 on 2018-03-17 14:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0009_auto_20180317_1434'),
]
operations = [
migrations.AlterField(
model_name='log',
name='browser',
field=models.CharField(blank=True, default='', max_length=255),
),
migrations.AlterField(
model_name='log',
name='country',
field=models.CharField(blank=True, default='', max_length=255),
),
migrations.AlterField(
model_name='log',
name='date',
field=models.CharField(blank=True, default='', max_length=255),
),
migrations.AlterField(
model_name='log',
name='ip_address',
field=models.CharField(blank=True, default='', max_length=255),
),
]
| 1.828125 | 2 |
Misc_plots.py | lens-corp/auto-plot | 0 | 12771730 |
import json
import csv
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import itertools
import os
import shutil
def get_dtype_groups(data_types):
float_unis = []
object_unis = []
int_unis = []
for i, v in data_types.items():
if i == np.dtype('float64') or i == np.dtype('float32'):
float_unis.append(v)
if i == np.dtype('O'):
object_unis.append(v)
if i == np.dtype('int64') or i == np.dtype('int32') or i == np.dtype('int16'):
int_unis.append(v)
return float_unis, object_unis, int_unis
def findsubsets(s, n):
return list(itertools.permutations(s, n))
def plot_3d(data, headers, data_types, filename):
dirpath = 'saved_plots/{}_Misc_Plots'.format(filename)
sub_folders = ['scatter_3dPlots']
if os.path.exists(dirpath) and os.path.isdir(dirpath):
shutil.rmtree(dirpath)
os.makedirs(dirpath)
for i in sub_folders:
if os.path.exists(i) and os.path.isdir(i):
shutil.rmtree(i)
os.makedirs(dirpath+'/'+i)
fig = plt.figure()
ax = plt.axes(projection='3d')
float_unis, object_unis, int_unis = get_dtype_groups(data_types)
palette = itertools.cycle(sns.color_palette())
if len(float_unis) > 0:
if len(int_unis) > 0:
cols = float_unis[0]+int_unis[0]
if len(cols) > 4:
cols = cols[:4]
pairs_3d = findsubsets(cols, 3)
else:
cols = float_unis[0]
if len(cols) > 4:
cols = cols[:4]
pairs_3d = findsubsets(cols, 3)
try:
for j in pairs_3d:
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.scatter(data[j[0]], data[j[1]],
data[j[2]], color=next(palette))
ax.legend()
x = j[0]
ax.set_xlabel(x, fontsize=20)
ax.set_ylabel(j[1], fontsize=20)
ax.set_zlabel(j[2], fontsize=20, rotation=0)
fig.set_size_inches(18.5, 10.5)
plt.savefig(
'./{}/scatter_3dPlots/{}_{}_{}_set.png'.format(dirpath, j[0], j[1], j[2]))
except Exception as e:
print(e)
print('error occured while plotting {} columns.'.format(j))
def plot_groupby(data, headers, data_types, filename):
dirpath = 'saved_plots/{}_Misc_Plots'.format(filename)
float_unis, object_unis, int_unis = get_dtype_groups(data_types)
try:
if len(object_unis) > 0:
for j in object_unis[0]:
df = data.groupby(j).mean()
# print(df)
fig, ax = plt.subplots()
df.plot(kind='bar')
fig.set_size_inches(18.5, 10.5)
unique_values = len(pd.unique(data[j]))
if unique_values > 30:
continue
plt.savefig('./{}/groupby_{}_bar_plot.png'.format(dirpath, j))
except Exception as e:
print(e)
print('error occured while plotting groupby by {} column.'.format(j))
| 2.296875 | 2 |
dlpy/applications/densenet.py | arharvey918/python-dlpy | 1 | 12771731 | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from dlpy.sequential import Sequential
from dlpy.layers import InputLayer, Conv2d, BN, Pooling, Concat, OutputLayer, GlobalAveragePooling2D
from dlpy.blocks import DenseNetBlock
from .application_utils import get_layer_options, input_layer_options
from dlpy.model import Model
from dlpy.utils import DLPyError
from dlpy.network import extract_input_layer, extract_output_layer, extract_conv_layer
def DenseNet(conn, model_table='DenseNet', n_classes=None, conv_channel=16, growth_rate=12, n_blocks=4,
n_cells=4, n_channels=3, width=32, height=32, scale=1, random_flip=None, random_crop=None,
offsets=(85, 111, 139), random_mutation=None):
'''
Generates a deep learning model with the DenseNet architecture.
Parameters
----------
conn : CAS
Specifies the connection of the CAS connection.
model_table : string
Specifies the name of CAS table to store the model.
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: None
conv_channel : int, optional
Specifies the number of filters of the first convolution layer.
Default: 16
growth_rate : int, optional
Specifies the growth rate of convolution layers.
Default: 12
n_blocks : int, optional
Specifies the number of DenseNet blocks.
Default: 4
n_cells : int, optional
Specifies the number of dense connection for each DenseNet block.
Default: 4
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 32
height : int, optional
Specifies the height of the input layer.
Default: 32
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (85, 111, 139)
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1608.06993.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# get all the parms passed in
parameters = locals()
channel_in = conv_channel # number of channel of transition conv layer
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# Top layers
model.add(Conv2d(conv_channel, width=3, act='identity', include_bias=False, stride=1))
for i in range(n_blocks):
model.add(DenseNetBlock(n_cells=n_cells, kernel_size=3, n_filter=growth_rate, stride=1))
# transition block
channel_in += (growth_rate * n_cells)
model.add(BN(act='relu'))
if i != (n_blocks - 1):
model.add(Conv2d(channel_in, width=3, act='identity', include_bias=False, stride=1))
model.add(Pooling(width=2, height=2, pool='mean'))
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
def DenseNet121(conn, model_table='DENSENET121', n_classes=1000, conv_channel=64, growth_rate=32,
n_cells=[6, 12, 24, 16], n_channels=3, reduction=0.5, width=224, height=224, scale=1,
random_flip=None, random_crop=None, offsets=(103.939, 116.779, 123.68), random_mutation=None):
'''
Generates a deep learning model with the DenseNet121 architecture.
Parameters
----------
conn : CAS
Specifies the connection of the CAS connection.
model_table : string
Specifies the name of CAS table to store the model.
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
conv_channel : int, optional
Specifies the number of filters of the first convolution layer.
Default: 64
growth_rate : int, optional
Specifies the growth rate of convolution layers.
Default: 32
n_cells : int array length=4, optional
Specifies the number of dense connection for each DenseNet block.
Default: [6, 12, 24, 16]
reduction : double, optional
Specifies the factor of transition blocks.
Default: 0.5
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3.
width : int, optional
Specifies the width of the input layer.
Default: 224.
height : int, optional
Specifies the height of the input layer.
Default: 224.
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1.
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1608.06993.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
# get all the parms passed in
parameters = locals()
n_blocks = len(n_cells)
model = Sequential(conn=conn, model_table=model_table)
# get the input parameters
input_parameters = get_layer_options(input_layer_options, parameters)
model.add(InputLayer(**input_parameters))
# Top layers
model.add(Conv2d(conv_channel, width=7, act='identity', include_bias=False, stride=2))
model.add(BN(act='relu'))
src_layer = Pooling(width=3, height=3, stride=2, padding=1, pool='max')
model.add(src_layer)
for i in range(n_blocks):
for _ in range(n_cells[i]):
model.add(BN(act='relu'))
model.add(Conv2d(n_filters=growth_rate * 4, width=1, act='identity', stride=1, include_bias=False))
model.add(BN(act='relu'))
src_layer2 = Conv2d(n_filters=growth_rate, width=3, act='identity', stride=1, include_bias=False)
model.add(src_layer2)
src_layer = Concat(act='identity', src_layers=[src_layer, src_layer2])
model.add(src_layer)
conv_channel += growth_rate
if i != (n_blocks - 1):
# transition block
conv_channel = int(conv_channel * reduction)
model.add(BN(act='relu'))
model.add(Conv2d(n_filters=conv_channel, width=1, act='identity', stride=1, include_bias=False))
src_layer = Pooling(width=2, height=2, stride=2, pool='mean')
model.add(src_layer)
model.add(BN(act='identity'))
# Bottom Layers
model.add(GlobalAveragePooling2D())
model.add(OutputLayer(act='softmax', n=n_classes))
return model
def DenseNet121_ONNX(conn, model_file, n_classes=1000, width=224, height=224,
offsets=(255*0.406, 255*0.456, 255*0.485), norm_stds=(255*0.225, 255*0.224, 255*0.229),
random_flip=None, random_crop=None, random_mutation=None, include_top=False):
"""
Generates a deep learning model with the DenseNet121_ONNX architecture.
The model architecture and pre-trained weights is generated from DenseNet121 ONNX trained on ImageNet dataset.
The model file and the weights file can be downloaded from https://support.sas.com/documentation/prod-p/vdmml/zip/.
To learn more information about the model and pre-processing.
Please go to the websites: https://github.com/onnx/models/tree/master/vision/classification/densenet-121.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_file : string
Specifies the absolute server-side path of the model table file.
The model table file can be downloaded from https://support.sas.com/documentation/prod-p/vdmml/zip/.
n_classes : int, optional
Specifies the number of classes.
Default: 1000
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
The channel order is BGR.
Default: (255*0.406, 255*0.456, 255*0.485)
norm_stds : double or iter-of-doubles, optional
Specifies a standard deviation for each channel in the input data.
The final input data is normalized with specified means and standard deviations.
The channel order is BGR.
Default: (255*0.225, 255*0.224, 255*0.229)
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique', 'randomresized', 'resizethencrop'
random_mutation : string, optional
Specifies how to apply data augmentations/mutations to the data in the input layer.
Valid Values: 'none', 'random'
include_top : bool, optional
Specifies whether to include pre-trained weights of the top layers (i.e., the FC layers)
Default: False
"""
parameters = locals()
input_parameters = get_layer_options(input_layer_options, parameters)
# load model and model weights
model = Model.from_sashdat(conn, path = model_file)
# check if a user points to a correct model.
if model.summary.shape[0] != 307:
raise DLPyError("The model file doesn't point to a valid DenseNet121_ONNX model. "
"Please check the SASHDAT file.")
# extract input layer config
model_table_df = conn.CASTable(**model.model_table).to_frame()
input_layer_df = model_table_df[model_table_df['_DLLayerID_'] == 0]
input_layer = extract_input_layer(input_layer_df)
input_layer_config = input_layer.config
# update input layer config
input_layer_config.update(input_parameters)
# update the layer list
model.layers[0] = InputLayer(**input_layer_config, name=model.layers[0].name)
# warning if model weights doesn't exist
if not conn.tableexists(model.model_weights.name).exists:
weights_file_path = os.path.join(os.path.dirname(model_file), model.model_name + '_weights.sashdat')
print('WARNING: Model weights is not attached '
'since system cannot find a weights file located at {}'.format(weights_file_path))
if include_top:
if n_classes != 1000:
raise DLPyError("If include_top is enabled, n_classes has to be 1000.")
else:
# since the output layer is non fully connected layer,
# we need to modify the convolution right before the output. The number of filter is set to n_classes.
conv_layer_df = model_table_df[model_table_df['_DLLayerID_'] == 305]
conv_layer = extract_conv_layer(conv_layer_df)
conv_layer_config = conv_layer.config
# update input layer config
conv_layer_config.update({'n_filters': n_classes})
# update the layer list
model.layers[-2] = Conv2d(**conv_layer_config,
name=model.layers[-2].name, src_layers=model.layers[-3])
# overwrite n_classes in output layer
out_layer_df = model_table_df[model_table_df['_DLLayerID_'] == 306]
out_layer = extract_output_layer(out_layer_df)
out_layer_config = out_layer.config
# update input layer config
out_layer_config.update({'n': n_classes})
# update the layer list
model.layers[-1] = OutputLayer(**out_layer_config,
name = model.layers[-1].name, src_layers=model.layers[-2])
# remove top weights
model.model_weights.append_where('_LayerID_<305')
model._retrieve_('table.partition', table=model.model_weights,
casout=dict(replace=True, name=model.model_weights.name))
model.set_weights(model.model_weights.name)
# recompile the whole network according to the new layer list
model.compile()
return model
| 2.234375 | 2 |
pyraft/test_client.py | yashrsharma44/pyraft | 0 | 12771732 | <reponame>yashrsharma44/pyraft<filename>pyraft/test_client.py
from json import encoder
from collections import defaultdict
from pyraft.client import NodeClient
import unittest
import uuid
import asyncio
import logging
import json
import multiprocessing
DELIMITER = "$"
def runner(coro):
asyncio.run(coro())
class DummyServer:
def __init__(self, host, port):
self.host = host
self.port = port
self.map = defaultdict(str)
async def serve(self):
server = await asyncio.start_server(self.handler, self.host, self.port)
addr = server.sockets[0].getsockname()
logging.info(f"serving on {addr}")
async with server:
await server.serve_forever()
async def shutdown(self):
logging.info("shutting down the server")
tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
logging.info(f"cancelling {len(tasks)}")
[task.cancel() for task in tasks]
logging.info(f"let the pending tasks finish")
done, pending = await asyncio.wait(tasks)
[task.cancel() for task in pending]
async def handler(self, reader, writer):
print("received")
breakpoint()
data = await reader.readline()
message = data.decode().strip()
operation, entry = message.split(DELIMITER)
if operation == "get":
writer.write(json.dumps(dict(value=self.get(entry)), "utf-8"))
elif operation == "set":
k, v = entry.split(" ")
self.set(k, v)
writer.write("done!\n")
else:
writer.write("unknown\toperation\n")
await writer.drain()
def set(self, key, value):
self.map[key] = value
def get(self, key):
return self.map[key]
class NodeClientTestSuite(unittest.IsolatedAsyncioTestCase):
def __init__(self, *args, **kwargs):
super(NodeClientTestSuite, self).__init__(*args, **kwargs)
self.id = uuid.uuid1()
self.server_process = None
def setUp(self):
self.client = NodeClient("127.0.0.1", 4040, self.id)
self.server = DummyServer("127.0.0.1", 4040)
def tearDown(self):
self.client.disconnect()
async def test_set(self):
loop = asyncio.get_event_loop()
tasks = []
tasks.append(asyncio.create_task(await loop.run_in_executor(None, asyncio.run(await self.server.serve()))))
async def assert_val():
await self.client.set("yash", 2)
data = await self.client.get("yash")
data.decode().strip()
self.assertEqual(data, 1)
tasks.append(asyncio.create_task(assert_val))
tasks.append(self.server.shutdown())
asyncio.wait(tasks)
# def test_get(self):
# pass
# def test_join(self):
# pass
# def test_vote(self):
# pass
if __name__ == "__main__":
async def runner():
srv = DummyServer('127.0.0.1', 4041)
await srv.serve()
asyncio.run(runner())
| 2.375 | 2 |
open_world/data.py | khalilbalaree/OWE-sBert | 0 | 12771733 | <reponame>khalilbalaree/OWE-sBert<gh_stars>0
import pickle
import os
from tqdm import tqdm
import numpy as np
from sentence_transformers import SentenceTransformer
# from name_entity_recognition import spacy_filter
def load_model(model='transe'):
if model == 'transe':
with open('./openke_models/transe300/entities.p', 'rb') as handle:
e = pickle.load(handle)
with open('./openke_models/transe300/relations.p', 'rb') as handle:
r = pickle.load(handle)
elif model == 'complex':
with open('./openke_models/complex300/entities_r.p', 'rb') as handle:
e_r = pickle.load(handle)
with open('./openke_models/complex300/entities_i.p', 'rb') as handle:
e_i = pickle.load(handle)
e = [e_r, e_i]
with open('./openke_models/complex300/relations_r.p', 'rb') as handle:
r_r = pickle.load(handle)
with open('./openke_models/complex300/relations_i.p', 'rb') as handle:
r_i = pickle.load(handle)
r = [r_r, r_i]
else:
exit('Not support yet!')
return e, r
def load_entities():
entities = {}
with open('./dbpedia50/entity2id.txt','r') as ef:
data = ef.readlines()[1:]
for line in data:
e = line.strip().split('\t')
name = e[0].strip()
_id = e[1].strip()
entities[_id] = name
# print("Number of entities: %d." % (len(entities)))
return entities
def load_relations():
relations = {}
with open('./dbpedia50/relation2id.txt','r') as ef:
data = ef.readlines()[1:]
for line in data:
e = line.strip().split('\t')
name = e[0].strip()
_id = e[1].strip()
relations[_id] = name
# print("Number of entities: %d." % (len(relations)))
return relations
def load_descriptions():
descriptions = {}
with open('./dbpedia50/descriptions.txt','r') as f:
data = f.readlines()
for line in data:
e = line.strip().split('\t')
name = e[0].strip()
desc = e[2].strip()
descriptions[name] = desc
# print("Number of desc: %d." % (len(descriptions)))
return descriptions
def load_data(device, cut=20000):
e,_ = load_model()
entities = load_entities()
descriptions = load_descriptions()
print('device: %s' % device)
# model = SentenceTransformer('average_word_embeddings_glove.6B.300d', device=device)
model = SentenceTransformer('distilbert-base-nli-mean-tokens', device=device)
print("Transforming descriptions using sbert...")
if not os.path.exists('./npdave/x.npy') or not os.path.exists('./npdave/y.npy'):
x = []
y = []
for i in tqdm(range(len(e))):
_id = str(i)
name = entities[_id]
this_desc = descriptions[name]
this_embedding = e[i]
x.append(model.encode(this_desc))
y.append(np.array(this_embedding))
x = np.array([i for i in x]).astype(np.float)
y = np.array([i for i in y]).astype(np.float)
np.save('./npdave/x.npy',x)
np.save('./npsave/y.npy',x)
print("Transformation done!")
else:
x = np.load('./npdave/x.npy')
y = np.load('./npdave/y.npy')
return x[:cut], y[:cut], x[cut:], y[cut:]
def load_data_complex(device, cut=20000):
e,_ = load_model('complex')
entities = load_entities()
descriptions = load_descriptions()
print('device: %s' % device)
model = SentenceTransformer('distilbert-base-nli-mean-tokens', device=device)
print("Transforming descriptions using sbert...")
x = []
yr = []
yi = []
for i in tqdm(range(len(e[0]))):
_id = str(i)
name = entities[_id]
this_desc = descriptions[name]
this_embedding_r = e[0][i]
this_embedding_i = e[1][i]
x.append(model.encode(this_desc))
yr.append(np.array(this_embedding_r))
yi.append(np.array(this_embedding_i))
x = np.array([i for i in x]).astype(np.float)
yr = np.array([i for i in yr]).astype(np.float)
yi = np.array([i for i in yi]).astype(np.float)
print("Transformation done!")
return x[:cut], yr[:cut], yi[:cut], x[cut:], yr[cut:], yi[cut:]
def relation_tail_train():
r_t = []
with open('./dbpedia50/train2id.txt', 'r') as ft:
data = ft.readlines()
for line in data:
e = line.strip().split('\t')
# h = e[0].strip()
t = e[1].strip()
r = e[2].strip()
r_t.append(r+':'+t)
return tuple(r_t)
# def filter_open_word_test(deep_filtered=False):
# entities = load_entities()
# relations = load_relations()
# if deep_filtered:
# r_t = relation_tail_train()
# else:
# r_t = ()
# with open('./open_world_dbpedia50/test_tail_open_converted.txt','r') as ef:
# data = ef.readlines()
# with open('./open_world_dbpedia50/test_tail_open_converted_filtered.txt', 'w') as wf:
# for line in data:
# e = line.strip().split('\t')
# h = e[0].strip()
# t = e[1].strip()
# r = e[2].strip()
# if deep_filtered:
# if r+':'+t in r_t:
# wf.write(h+'\t'+t+'\t'+r+'\n')
# else:
# if t in entities.values() and r in relations.values():
# wf.write(h+'\t'+t+'\t'+r+'\n')
def load_open_word_test(device, deep_filtered=False):
# if not os.path.exists('./open_world_dbpedia50/test_filtered.txt'):
# print('Filtering test file...')
# filter_open_word_test(deep_filtered)
entities = dict((v,k) for k,v in load_entities().items())
relations = dict((v,k) for k,v in load_relations().items())
descs = load_descriptions()
# model = SentenceTransformer('average_word_embeddings_glove.6B.300d', device=device)
model = SentenceTransformer('distilbert-base-nli-mean-tokens', device=device)
hs_name = []
hs = []
ts = []
rs = []
print("Transforming descriptions using sbert...")
with open('./open_world_dbpedia50/test_tail_open_converted.txt', 'r') as f:
data = f.readlines()
for line in tqdm(data):
e = line.strip().split('\t')
h = e[0].strip()
t = e[1].strip()
r = e[2].strip()
this_desc = descs[h]
embeddings = model.encode(this_desc)
hs.append(embeddings)
hs_name.append(h)
t_id = entities[t]
ts.append(t_id)
r_id = relations[r]
rs.append(r_id)
print("Transformation done!")
return hs, ts, rs, hs_name
def target_filter():
filter_list = {}
relations = dict((v,k) for k,v in load_relations().items())
entities = dict((v,k) for k,v in load_entities().items())
with open('./open_world_dbpedia50/test_tail_open_converted.txt', 'r') as ft:
data = ft.readlines()
for line in data:
e = line.strip().split('\t')
h = e[0].strip()
t = e[1].strip()
r = e[2].strip()
r = relations[r]
t = entities[t]
if h+':'+r not in filter_list:
filter_list[h+':'+r] = [t]
else:
this_list = filter_list[h+':'+r]
this_list.append(t)
filter_list[h+':'+r] = this_list
return filter_list
def load_data_for_bert(cut=20000):
e,_ = load_model()
entities = load_entities()
descriptions = load_descriptions()
x = []
y = []
for i in range(len(e)):
_id = str(i)
name = entities[_id]
this_desc = descriptions[name][:500] #cut description, else, too intensive for local hardware
this_embedding = e[i]
x.append(this_desc)
y.append(np.array(this_embedding))
y = np.array([i for i in y]).astype(np.float)
return x[:cut], y[:cut], x[cut:], y[cut:]
| 2.375 | 2 |
plugins/tff_backend/tff_backend_plugin.py | threefoldfoundation/app_backend | 0 | 12771734 | <reponame>threefoldfoundation/app_backend
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
from framework.bizz.authentication import get_current_session
from framework.plugin_loader import get_plugin, BrandingPlugin
from framework.utils.plugins import Handler, Module
from mcfw.consts import AUTHENTICATED, NOT_AUTHENTICATED
from mcfw.restapi import rest_functions, register_postcall_hook
from mcfw.rpc import parse_complex_value
from plugins.rogerthat_api.rogerthat_api_plugin import RogerthatApiPlugin
from plugins.tff_backend import rogerthat_callbacks
from plugins.tff_backend.api import investor, nodes, global_stats, users, audit, agenda, flow_statistics, \
installations, nodes_unauthenticated
from plugins.tff_backend.bizz.authentication import get_permissions_from_scopes, get_permission_strings, Roles
from plugins.tff_backend.bizz.statistics import log_restapi_call_result
from plugins.tff_backend.configuration import TffConfiguration
from plugins.tff_backend.handlers.cron import RebuildSyncedRolesHandler, UpdateGlobalStatsHandler, \
SaveNodeStatusesHandler, BackupHandler, CheckNodesOnlineHandler, ExpiredEventsHandler, RebuildFirebaseHandler, \
CheckOfflineNodesHandler, CheckStuckFlowsHandler
from plugins.tff_backend.handlers.index import IndexPageHandler
from plugins.tff_backend.handlers.testing import AgreementsTestingPageHandler
from plugins.tff_backend.handlers.update_app import UpdateAppPageHandler
from plugins.tff_backend.patch_onfido_lib import patch_onfido_lib
class TffBackendPlugin(BrandingPlugin):
def __init__(self, configuration):
super(TffBackendPlugin, self).__init__(configuration)
self.configuration = parse_complex_value(TffConfiguration, configuration, False) # type: TffConfiguration
rogerthat_api_plugin = get_plugin('rogerthat_api')
assert (isinstance(rogerthat_api_plugin, RogerthatApiPlugin))
rogerthat_api_plugin.subscribe('app.installation_progress', rogerthat_callbacks.installation_progress)
rogerthat_api_plugin.subscribe('messaging.flow_member_result', rogerthat_callbacks.flow_member_result)
rogerthat_api_plugin.subscribe('messaging.form_update', rogerthat_callbacks.form_update)
rogerthat_api_plugin.subscribe('messaging.update', rogerthat_callbacks.messaging_update)
rogerthat_api_plugin.subscribe('messaging.poke', rogerthat_callbacks.messaging_poke)
rogerthat_api_plugin.subscribe('friend.is_in_roles', rogerthat_callbacks.friend_is_in_roles)
rogerthat_api_plugin.subscribe('friend.update', rogerthat_callbacks.friend_update)
rogerthat_api_plugin.subscribe('friend.invite_result', rogerthat_callbacks.friend_invite_result)
rogerthat_api_plugin.subscribe('friend.register_result', rogerthat_callbacks.friend_register_result)
rogerthat_api_plugin.subscribe('system.api_call', rogerthat_callbacks.system_api_call)
patch_onfido_lib()
register_postcall_hook(log_restapi_call_result)
def get_handlers(self, auth):
yield Handler(url='/', handler=IndexPageHandler)
yield Handler(url='/update-app', handler=UpdateAppPageHandler)
yield Handler(url='/testing/agreements', handler=AgreementsTestingPageHandler)
authenticated_handlers = [nodes, investor, global_stats, users, audit, agenda, flow_statistics, installations]
for _module in authenticated_handlers:
for url, handler in rest_functions(_module, authentication=AUTHENTICATED):
yield Handler(url=url, handler=handler)
not_authenticated_handlers = [nodes_unauthenticated]
for _module in not_authenticated_handlers:
for url, handler in rest_functions(_module, authentication=NOT_AUTHENTICATED):
yield Handler(url=url, handler=handler)
if auth == Handler.AUTH_ADMIN:
yield Handler(url='/admin/cron/tff_backend/backup', handler=BackupHandler)
yield Handler(url='/admin/cron/tff_backend/rebuild_synced_roles', handler=RebuildSyncedRolesHandler)
yield Handler(url='/admin/cron/tff_backend/global_stats', handler=UpdateGlobalStatsHandler)
yield Handler(url='/admin/cron/tff_backend/check_nodes_online', handler=CheckNodesOnlineHandler)
yield Handler(url='/admin/cron/tff_backend/check_offline_nodes', handler=CheckOfflineNodesHandler)
yield Handler(url='/admin/cron/tff_backend/save_node_statuses', handler=SaveNodeStatusesHandler)
yield Handler(url='/admin/cron/tff_backend/events/expired', handler=ExpiredEventsHandler)
yield Handler(url='/admin/cron/tff_backend/check_stuck_flows', handler=CheckStuckFlowsHandler)
yield Handler(url='/admin/cron/tff_backend/rebuild_firebase', handler=RebuildFirebaseHandler)
def get_client_routes(self):
return ['/orders<route:.*>', '/node-orders<route:.*>', '/investment-agreements<route:.*>',
'/global-stats<route:.*>', '/users<route:.*>', '/agenda<route:.*>', '/flow-statistics<route:.*>',
'/installations<route:.*>', '/dashboard<route:.*>', '/nodes<route:.*>']
def get_modules(self):
perms = get_permissions_from_scopes(get_current_session().scopes)
is_admin = Roles.BACKEND_ADMIN in perms or Roles.BACKEND in perms
yield Module(u'tff_dashboard', [], 0)
if is_admin or Roles.BACKEND_READONLY in perms:
yield Module(u'tff_orders', [], 1)
yield Module(u'tff_global_stats', [], 3)
yield Module(u'tff_users', [], 4)
yield Module(u'tff_agenda', [], 5)
yield Module(u'tff_flow_statistics', [], 6)
yield Module(u'tff_installations', [], 7)
for role in [Roles.BACKEND_READONLY, Roles.NODES, Roles.NODES_READONLY , Roles.NODES_ADMIN]:
if is_admin or role in perms:
yield Module(u'tff_nodes', [], 8)
break
if is_admin:
yield Module(u'tff_investment_agreements', [], 2)
def get_permissions(self):
return get_permission_strings(get_current_session().scopes)
| 0.867188 | 1 |
section4/scikit_learn_pipeline/predict.py | Jeffresh/deployment-of-machine-learning-models | 0 | 12771735 | <reponame>Jeffresh/deployment-of-machine-learning-models<filename>section4/scikit_learn_pipeline/predict.py
import pandas as pd
import joblib
import config
def make_prediction(input_data):
_pipe_price = joblib.load(filename=config.PIPELINE_NAME)
results = _pipe_price.predict(input_data)
return results
if __name__ == '__main__':
# test pipeline
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
data = pd.read_csv(config.TRAINING_DATA_FILE)
X_train, X_test, y_train, y_test = train_test_split(data[config.FEATURES],
data[config.TARGET],
train_size=0.1,
random_state=0)
pred = make_prediction(X_test)
# mse and rms
print('test mse: {}'.format(int(mean_squared_error(y_test, np.exp(pred)))))
print('test rmse: {}'.format(
int(np.sqrt(mean_squared_error(y_test, np.exp(pred))))))
print('test r2: {}'.format(r2_score(y_test, np.exp(pred))))
| 3.1875 | 3 |
showminder/showminder/settings.py | chassing/showminder | 1 | 12771736 | <gh_stars>1-10
from pathlib import Path
import environ
env = environ.Env(DEBUG=(bool, False))
env.read_env()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "<KEY>
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env("DEBUG")
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"bootstrap5",
"api",
"frontend",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
]
ROOT_URLCONF = "showminder.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [BASE_DIR / "templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"showminder.context_processors.version",
]
},
}
]
WSGI_APPLICATION = "showminder.wsgi.application"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": "showminder",
"USER": env("DB_USER", default="user"),
"PASSWORD": env("DB_PASSWORD", default="password"),
"HOST": env("DB_HOST", default="postgres"),
"PORT": "5432",
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_ROOT = "/tmp/staticfiles"
STATIC_URL = "/static/"
STATICFILES_DIRS = [BASE_DIR / "static"]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# login
LOGIN_REDIRECT_URL = "/"
# session timeout - 200 weeks
SESSION_COOKIE_AGE = 1209600 * 100
# MAP_TITLES = {
# "s w a t": "swat",
# "greys anatomy": "grey's anatomy",
# "911": "9-1-1",
# "ncis los angeles": "ncis: los angeles",
# "ncis new orleans": "ncis: new orleans",
# }
MAP_TITLES = env("MAP_TITLES", default={})
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
TMDB_API_KEY = env("TMDB_API_KEY", default="")
TMDB_BASE_URL = "https://image.tmdb.org/t/p/w500/"
| 1.953125 | 2 |
flask_value_checker/restrictions/rtypes/file.py | therealadityashankar/flask-value-checker | 1 | 12771737 | from ..generic_restriction import GenericRestriction
from flask import request
class FileRestriction(GenericRestriction):
type_keyword = "file"
attributes = {
"optional": {},
"number": {"parameters": [{"type": int}]},
}
def __init_restriction__(self):
self.optional = False
def compile_restriction(self, name: str, vals: list):
if name == "optional":
self.optional = True
def check_for(self, _):
if not self.optional:
if not self.parameter in request.files:
return False, f"file '{self.parameter}' is missing !"
return True, None
| 2.78125 | 3 |
SeqGAN/seqgan_train.py | rickyHong/GANs-Repository-repl | 3 | 12771738 | <reponame>rickyHong/GANs-Repository-repl
# initial python file
| 0.742188 | 1 |
tools/plot/PlotHelper.py | shenweihai1/veribetrkv-linear | 0 | 12771739 | #!/usr/bin/env python3
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import numpy as np
import sys
import operator
import bisect
import os
class Scale:
def __init__(self, prefix, mult):
self.prefix = prefix
self.mult = float(mult)
def __call__(self):
return self.mult
def __repr__(self):
return self.prefix
Unit = Scale("", 1)
K = Scale("K", 1000)
Ki = Scale("Ki", 1024)
Mi = Scale("Mi", 1<<20)
Gi = Scale("Gi", 1<<30)
G = Scale("G", 1e9)
class PlotHelper:
def __init__(self, numPlots, scale=1, columns=None):
self.numPlots = numPlots
if columns:
self.columns = columns
else:
self.columns = 2 if numPlots > 4 else 1
self.rows = int((numPlots+0.5)/self.columns)
# You may need: sudo pip3 install --upgrade matplotlib
self.fig = plt.figure(#constrained_layout=True,
figsize = (scale*7*self.columns, scale*self.rows*2))
self.gridspec = GridSpec(self.rows, self.columns)
#self.fig, self.axes = plt.subplots(rows, columns, figsize=())
#self.axes = self.axes.transpose().flatten()
plt.subplots_adjust(left=0.06, right=0.94, hspace=0.6, top=0.95, bottom=0.05);
self.nextAxisSlot = 0
def nextAxis(self, depth=1):
startSpot = self.nextAxisSlot
self.nextAxisSlot += depth
col = int(startSpot / self.rows)
row = int(startSpot % self.rows)
endRow = row + depth
return self.fig.add_subplot(self.gridspec[row:endRow, col])
def save(self, figname):
#plt.tight_layout()
plt.savefig(figname)
class LambdaTrace:
"""Wrap a trace in a function."""
def __init__(self, lam, units):
self.lam = lam
self.units = units
def __getitem__(self, opn):
return self.lam(opn)
class StackedTraces:
"""Sum a set of traces."""
def __init__(self, traces):
self.traces = traces
self.units = traces[0].units
def __getitem__(self, opn):
return sum([tr[opn] for tr in self.traces])
def plotVsKop(ax, exp, lam, debug=False):
# ax: which axis to apply the x-label to
# lam(opn): compute a y value for a given opn value
# returns xs,ys suitable to be passed to plt.plot
ax.set_xlabel("op num (K)")
ax.set_xlim(left = 0, right=exp.op_max/K())
xs = []
ys = []
for opn in exp.sortedOpns:
try:
x = opn/K()
y = lam(opn)
if x!=None and y != None:
xs.append(x)
ys.append(y)
elif debug:
print (x, y)
except KeyError:
if debug: raise
else: pass
except IndexError:
if debug: raise
else: pass
assert None not in xs
assert None not in ys
return xs,ys
def windowedPair(ax, num_trace, denom_trace, scale=Unit, window=100*K()):
ax.set_ylabel("%s%s/%s" % (scale, num_trace.units, denom_trace.units))
def val(opn):
opnBefore = opn - window
#if opnBefore < 0: return None
try:
num = num_trace[opn] - num_trace[opnBefore]
denom = denom_trace[opn] - denom_trace[opnBefore]
except TypeError: # None because some opn isn't defined
return None
if denom == 0:
return None
rate = num/scale()/denom
return rate
return val
def singleTrace(ax, trace, scale=Unit):
ax.set_ylabel("%s%s" % (scale, trace.units))
def lam(opn):
try:
return trace[opn]/scale()
except TypeError: # None because trace undefined at opn
return None
return lam
def set_xlim(ax, experiments):
xlim_right = 0
for exp in experiments:
xlim_right = max(xlim_right, exp.op_max/K())
ax.set_xlim(left = 0, right=xlim_right)
resistor_spectrum_ = ["black", "brown", "red", "orange", "green", "indigo", "blue", "violet"]
# same colors as in the aws automation console
spectrum_ = ["red", "yellow", "green", "cyan", "blue", "magenta",
"#800000", "#808000", "#008000", "#008080", "#000080", "#800080"]
def spectrum(idx):
return spectrum_[idx % len(spectrum_)]
def plotThroughput(ax, experiments):
ax.set_title("op throughput")
a2 = ax.twinx()
a2.set_ylabel("s")
for expi in range(len(experiments)):
exp = experiments[expi]
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.operation, exp.elapsed, scale=K)), color=spectrum(expi))
line.set_label(exp.nickname + " tput")
ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.operation, exp.elapsed, window=1000*K(), scale=K)), color=spectrum(expi), linestyle="dotted")
def elapsedTime(opn):
return exp.elapsed[opn]
line, = a2.plot(*plotVsKop(ax, exp, elapsedTime), color=spectrum(expi))
line.set_label(exp.nickname + " rate")
ax.legend(loc="upper left")
ax.set_yscale("log")
ax.set_ylim(bottom=0.1)
ax.grid(which="major", color="black")
ax.grid(which="minor", color="#dddddd")
set_xlim(ax, experiments)
a2.legend(loc="lower left")
for exp in experiments[:1]:
for phase,opn in exp.phase_starts.items():
#print (phase,opn,opn/K())
ax.text(opn/K(), ax.get_ylim()[0], phase)
def plotManyForeach(ax, experiments, plotOneFunc):
for i in range(len(experiments)):
exp = experiments[i]
plotkwargs = {"color": spectrum(i)}
plotOneFunc(exp, plotkwargs)
def plotMany(ax, experiments, plotOneFunc):
"""plotMany with some standard axes adjustments for op x axis"""
plotManyForeach(ax, experiments, plotOneFunc)
ax.set_ylim(bottom=0)
set_xlim(ax, experiments)
ax.legend()
def plotGrandUnifiedMemory(ax, experiments):
ax.set_title("Grand Unified Memory")
linestyles=["solid", "dashed", "dotted", "-."]
coloridx = [0]
def plotOneExp(exp, plotkwargs):
labelidx = [0]
plotkwargs["color"] = spectrum(coloridx[0])
is_first_exp = coloridx[0]==0
coloridx[0] += 1
def plotWithLabel(lam, exp_nick, lbl, always=False):
plotkwargs["linestyle"] = linestyles[labelidx[0] % len(linestyles)]
#print("using color %s for label %s" % (plotkwargs["color"], lbl))
labelidx[0] += 1
xs,ys = plotVsKop(ax, exp, lam)
if len(xs)==0:
# don't clutter legendspace
return
line, = ax.plot(xs, ys, **plotkwargs)
if is_first_exp or always:
line.set_label(exp_nick + lbl + (" %.2f%sB" % (ys[-1], Gi.prefix)))
plotWithLabel(singleTrace(ax, exp.os_map_total, scale=Gi),
exp.nickname, " OS mem")
# plotWithLabel(singleTrace(ax, exp.os_map_heap, scale=Gi),
# exp.nickname, " OS heap")
plotWithLabel(singleTrace(ax, exp.cgroups_memory_usage_bytes, scale=Gi),
exp.nickname, " cgroups-usage", always=True)
# malloc & jemalloc
plotWithLabel(singleTrace(ax, exp.jem_mapped, scale=Gi),
exp.nickname, " jem mapped")
# plotWithLabel(singleTrace(ax, exp.jem_active, scale=Gi),
# exp.nickname, " jem active")
plotWithLabel(singleTrace(ax, exp.jem_allocated, scale=Gi),
exp.nickname, " jem alloc")
mallocLam = singleTrace(ax, exp.microscopes["total"].getTrace("open_byte"), scale=Gi) if "total" in exp.microscopes else lambda opn: None
plotWithLabel(mallocLam, exp.nickname, " malloc")
# "underlying" view: measured in C++ below Dafny but above malloc
plotWithLabel(singleTrace(ax, exp.kvl_underlying, scale=Gi),
exp.nickname, " underlying")
# internal views, stacked
traceNames = ["bucket-message-bytes", "bucket-key-bytes", "pivot-key-bytes"]
def StackFor(count):
return [exp.accum[n] for n in traceNames[:count+1]]
# Just plot the sum of internal stuff
try:
stackedTraces = StackedTraces(StackFor(len(traceNames)))
plotWithLabel(singleTrace(ax, stackedTraces, scale=Gi),
exp.nickname, " internal-accum-bytes", always=True)
except: pass
for i in range(len(experiments)):
exp = experiments[i]
plotOneExp(exp, {"linestyle": linestyles[i % len(linestyles)]})
ax.legend()
set_xlim(ax, experiments)
def plotRocksIo(ax, experiments):
ax.set_title("rocks io")
window = 10*K()
def plotOneExp(exp, plotkwargs):
hit_ratio = LambdaTrace(lambda opn: exp.rocks_io_hits[opn]/exp.rocks_io_reads[opn], "frac")
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.rocks_io_hits, exp.rocks_io_reads, window=window)), **plotkwargs)
line.set_label(exp.nickname + " rio_ratio")
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.rocks_io_hits, exp.rocks_io_reads, window=100*window)), linestyle="dotted", **plotkwargs)
# line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.rocks_io_reads, exp.operation, window=window)))
# line.set_label("rio_access")
miss_pages = LambdaTrace(lambda opn: (exp.rocks_io_reads[opn] - exp.rocks_io_hits[opn]), "pages")
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, miss_pages, exp.operation, scale=Unit, window=100*K())), **plotkwargs)
line.set_label(exp.nickname + " miss_per_opn (%s)" % miss_pages.units)
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, miss_pages, exp.operation, scale=Unit, window=1000*K())), linestyle="dotted", **plotkwargs)
plotMany(ax, experiments, plotOneExp)
def plotCpuTime(ax, experiments):
ax.set_title("CPU time")
def plotOneExp(exp, plotkwargs):
ticksPerSecond = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
user_sec = LambdaTrace(lambda opn: exp.utime[opn]/ticksPerSecond, "s")
sys_sec = LambdaTrace(lambda opn: exp.stime[opn]/ticksPerSecond, "s")
#print("ticksPerSecond", ticksPerSecond)
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, user_sec, exp.elapsed)), **plotkwargs)
line.set_label(exp.nickname+" user")
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, sys_sec, exp.elapsed)), **plotkwargs, linestyle="dotted")
line.set_label(exp.nickname+" sys")
plotMany(ax, experiments, plotOneExp)
def plotProcIoBytes(ax, experiments):
ax.set_title("proc io bytes")
def plotOneExp(exp, plotkwargs):
window = 1000*K()
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.procio_read_bytes, exp.operation, scale=Ki, window=window)), **plotkwargs)
line.set_label(exp.nickname + " read")
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.procio_write_bytes, exp.operation, scale=Ki, window=window)), linestyle="dotted", **plotkwargs)
line.set_label(exp.nickname + " write")
plotMany(ax, experiments, plotOneExp)
ax.grid(which="major", color="#dddddd")
def plotIoLatencyCdf(ax, experiments):
ax.set_title("io latency")
ax.set_yscale("log")
# retrieve from metadata?
assumeProcCyclesPerSec = 2.2*G()
def plotOneExpAt(exp, plotkwargs, opn):
for cdf_src,label,linestyle in (
(exp.iolatency_read, "read", "-"),
(exp.iolatency_write, "write", "dotted")):
cdf = cdf_src[opn]
if cdf==None: continue
line, = ax.plot([cycles/assumeProcCyclesPerSec*K() for cycles in cdf.xs], cdf.ys, linestyle=linestyle, **plotkwargs)
line.set_label("%s %s @%dKop" % (exp.nickname, label, opn/K()))
def plotOneExp(exp, plotkwargs):
try: pass #print(exp.nickname, exp.iolatency_read.sortedKeys())
except: pass
# plotOneExpAt(exp, plotkwargs, 500000)
#print(plotkwargs)
plotOneExpAt(exp, plotkwargs, 8000000)
# plotOneExpAt(exp, plotkwargs, 2700000)
plotManyForeach(ax, experiments, plotOneExp)
ax.set_xlabel("ms assuming clock %.1f%sHz" % (assumeProcCyclesPerSec/G(), G))
ax.legend()
def plotSlowIos(ax, experiments):
threshTraces = set()
for exp in experiments:
try: threshTraces.add(exp.slow_thresh)
except IndexError: pass
try:
threshValues = set([t[t.sortedKeys()[0]] for t in threshTraces if not t.empty()])
descr = str(list(threshValues)[0]) if len(threshValues)==1 else str(threshValues)
ax.set_title("slow ios (thresh %s %s)" % (
descr, list(threshTraces)[0].units))
except: raise
window = 10*K()
def plotOneExp(exp, plotkwargs):
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.slow_reads, exp.operation, window=window)), **plotkwargs)
print(exp.nickname, len(exp.slow_reads.data))
if not exp.slow_reads.empty():
line.set_label(exp.nickname + " reads")
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.slow_writes, exp.operation, window=window)), linestyle="dotted", **plotkwargs)
if not exp.slow_writes.empty():
line.set_label(exp.nickname + " writes")
plotManyForeach(ax, experiments, plotOneExp)
ax.legend()
ax.grid(which="major", color="#dddddd")
set_xlim(ax, experiments)
ax.set_ylim(top=1)
def plotCacheStats(ax, experiments):
ax.set_title("cache stats")
def plotOneExp(exp, plotkwargs):
line, = ax.plot(*plotVsKop(ax, exp, windowedPair(ax, exp.writeback_stalls, exp.operation, window=10*K())), **plotkwargs)
line.set_label(exp.nickname + " stalls")
plotManyForeach(ax, experiments, plotOneExp)
ax.legend()
ax.grid(which="major", color="#dddddd")
set_xlim(ax, experiments)
| 2.734375 | 3 |
include/createMap.py | sahibdhanjal/DeepLocNet | 27 | 12771740 | <reponame>sahibdhanjal/DeepLocNet<gh_stars>10-100
import numpy as np
from pylayers.gis.layout import Layout
from pylayers.antprop.coverage import *
from pylayers.simul.link import *
from mayavi import mlab
from pdb import set_trace as bp
from math import sqrt, floor, ceil
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class parseMap:
def __init__(self, L, rows, cols):
self.map = np.ones((cols,rows), dtype=int)
self.unit = 0
self.factor = None
self.min = None
self.bdist = [99999, 99999]
self.parse(L, rows, cols)
def parse(self, L, rows, cols):
G = L.Gs
minX, maxX, minY, maxY = L.ax
dx = maxX-minX; dy = maxY-minY
factX = rows/dx ; factY = cols/dy
all = np.array(G.nodes())
self.unit = sqrt((dx**2 + dy**2)/(rows**2 + cols**2))
self.factor = [factX, factY]
self.min = [minX, minY]
# nodes = [ x for x in all if x<0 ]
edges = [ x for x in all if x>0 ]
# print("grid distance:",rows, cols, " | map distance: ",dx, dy, " | factors :",factX, factY, " | origin shift by: ", minX, minY)
for i in edges:
if 'AIR' in G.node[i]['name'] or 'WOOD' in G.node[i]['name'] or 'PILLLAR' in G.node[i]['name'] or 'GLASS' in G.node[i]['name'] or 'WINDOW' in G.node[i]['name']:
continue
v1, v2 = G.node[i]['connect']
v1x, v1y = G.pos[v1]
v2x, v2y = G.pos[v2]
# find where the first real min node is
if v1x!=minX: self.bdist[0] = min(self.bdist[0], v1x)
if v2x!=minX: self.bdist[0] = min(self.bdist[0], v2x)
if v1y!=minY: self.bdist[1] = min(self.bdist[1], v1y)
if v2y!=minY: self.bdist[1] = min(self.bdist[1], v2y)
# shift origin
v1x -= minX ; v1y -= minY
v2x -= minX ; v2y -= minY
# transform factor
v1x *= factX ; v1y *= factY
v2x *= factX ; v2y *= factY
if v1x==0 or v2x==0 or v1y ==0 or v2y == 0:
continue
# check cases when vertices are the upper boundaries of the image
if v1x==rows or v2x==rows or v1y == cols or v2y == cols:
continue
if abs(v1x-v2x)<=1:
rmin = int(min(v1y, v2y))
rmax = int(max(v1y, v2y))
x = int(v1x)
for i in range(rmin, rmax+1):
self.map[i][x] = 0
if abs(v1y-v2y)<=1:
rmin = int(min(v1x, v2x))
rmax = int(max(v1x, v2x))
y = int(v1y)
for i in range(rmin, rmax+1):
self.map[y][i] = 0
class createMap:
def __init__(self, name = 'test.ini', dim = 2, maxZ = 2):
self.name = name # name of file
self.map = None # 2D grid map with 0 as wall, 1 as free cell
self.StrengthMap = None # map with nAP depth of dBM
self.Tx = [] # 3D position of APs
self.pathUnit = 0 # pixel/m
self.numAPs = 0 # number of APs
self.C = Coverage(self.name) # coverage object
self.maxZ = maxZ # max height of blueprint
self.dim = dim # dimension for localization
self.range = [self.C.nx, self.C.ny] # range of layout (xmin, xmax, ymin, ymax)
self.resolution = 0.5 # distances at which each map is created
self.factor = None # X conversion of map to grid coordinates
self.min = None # minX factor to be added to AP Loc
self.DL = DLink(L=self.C.L) # DLink for 3D plotting
self.bdist = None # boundary distances
self.getMap()
self.parseAPs()
'''
Parses number and locations of APs
'''
def parseAPs(self):
self.numAPs = len(self.C.dap)
for i in range(self.numAPs):
y,x,z = self.C.dap[i]['p']
x -= self.min[0] ; y -= self.min[1]
x *= self.factor[0] ; y *= self.factor[1]
x = int(x) ; y = int(y)
if self.dim==2 : self.Tx.append((x,y))
else : self.Tx.append((x,y,z))
'''
Parses the map and other relevant parameters
through the parser class
'''
def getMap(self):
parser = parseMap(self.C.L, self.C.nx, self.C.ny)
self.map = parser.map
self.pathUnit = parser.unit
self.min = parser.min
self.factor = parser.factor
self.bdist = parser.bdist
'''
Refer coverage.py - lines 843 onwards for details
set ap to -1 to visualize all access points
otherwise set it to the AP index - [0, nAP]
'''
def cover(self):
if self.dim == 3:
raise TypeError("Use cover3D() instead of cover()")
self.C.zgrid = self.maxZ
self.C.cover()
V = self.C.CmWp[0,:,:]
U = self.reshapeMap(V)
self.StrengthMap = 10*np.log10(U)
'''
Refer coverage.py - lines 843 onwards for details
set ap to -1 to visualize all access points
otherwise set it to the AP index - [0, nAP]
'''
def cover3D(self):
if self.dim == 2:
raise TypeError("Use cover() instead of cover3D()")
h = 0
height = []
while h<=self.maxZ:
height.append(h)
h += self.resolution
self.StrengthMap = []
for h in height:
self.C.zgrid = h
self.C.cover()
V = self.C.CmWp[0,:,:]
U = self.reshapeMap(V)
self.StrengthMap.append(10*np.log10(U))
print('Strength Calculated at level: {:.1f}'.format(h), end="\r")
self.StrengthMap = np.array(self.StrengthMap)
print()
'''
Reshapes from (x*y x 1 x nAP) to (x x y x nAP)
'''
def reshapeMap(self, M):
_, nAP = M.shape
x, y = self.C.nx, self.C.ny
sMap = np.zeros((y,x,nAP))
for i in range(nAP):
sMap[:,:,i] = M[:,i].reshape((x,y)).T
return sMap
'''
print AP locations
'''
def printAPLocs(self):
for i in range(self.numAPs):
print("AP ",i,": [",self.Tx[i][0],", ", self.Tx[i][1],", ", self.Tx[i][2], "]")
'''
Helper function to visualize all waypoints, access points,
the ground truth and localized path and the localized APs
'''
def visualize(self, start=None, goal=None, wayPts=None, path=None, TX=None, ID=None):
if self.dim == 3: raise ValueError("Use visualize3D() instead of visualize() for 3 dimensions")
print("Displaying Floor Plan.")
plt.imshow(self.map, cmap="gray")
Tx = self.Tx
# display the waypoints by RRT
if wayPts!=None:
rows = []; cols = []
for x,y in wayPts:
rows.append(x); cols.append(y)
plt.plot(cols, rows, 'b.-')
# display the actual AP locations
if Tx!=None:
rows = []; cols = []
ctr = 1
for i in Tx:
rows.append(int(i[0])); cols.append(int(i[1]))
plt.text(i[1],i[0]," AP "+str(ctr), color='black')
ctr += 1
plt.plot(cols, rows, 'kx')
# display the localized path
if path!=None:
rows = []; cols = []
for i in path:
rows.append(i[0]); cols.append(i[1])
plt.plot(cols, rows, 'c.-')
# display the estimated AP locations
if TX!=None and ID!=None:
rows = []; cols = []
ctr = 1
for i in TX:
rows.append(int(i[0])); cols.append(int(i[1]))
plt.text(i[1],i[0]," AP "+str(ID[ctr-1]+1), color='red')
ctr += 1
plt.plot(cols, rows, 'rx')
if start : plt.plot(start[1], start[0], 'gs', markersize=6)
if goal : plt.plot(goal[1], goal[0], 'rs', markersize=6)
plt.gca().invert_yaxis()
plt.show()
def visualize3D(self, start=None, goal=None, wayPts=None, path=None, TX=None, ID=None):
print("Displaying Floor Plan.")
[factX, factY] = self.factor
[minX, minY] = self.min
[boundX, boundY] = self.bdist
Tx = self.Tx
# display the actual AP locations
x = []; y = [] ; z = []
for i in Tx:
x.append(i[0]/factX + minX)
y.append(i[1]/factY + minY)
if self.dim==2: z.append(self.maxZ)
if self.dim==3: z.append(i[2])
mlab.points3d(y, x, z, scale_factor=0.4, color=(1.0, 1.0, 0.0))
# display the estimated AP locations
if TX!=None and ID!=None:
if len(TX)==0:
print("No AP detected")
pass
else:
x = []; y = [] ; z = []
for i in TX:
x.append(i[0]/factX + minX)
y.append(i[1]/factY + minY)
if self.dim==2: z.append(self.maxZ)
if self.dim==3: z.append(i[2])
mlab.points3d(y, x, z, scale_factor=0.4, color=(0.0, 1.0, 1.0))
# display the waypoints by RRT
if wayPts!=None:
x = []; y = [] ; z = []
for i in wayPts:
x.append(i[0]/factX + minX)
y.append(i[1]/factY + minY)
if self.dim==2: z.append(0.3)
if self.dim==3: z.append(i[2])
mlab.plot3d(y, x, z, tube_radius=0.025, color=(1.0,1.0,1.0))
# display the localized path
if path!=None:
x = []; y = [] ; z = []
for i in path:
x.append(i[0]/factX + minX)
y.append(i[1]/factY + minY)
if self.dim==2: z.append(0.3)
if self.dim==3: z.append(i[2])
mlab.plot3d(y, x, z, tube_radius=0.025, color=(0.0,0.0,0.0))
if start:
if self.dim==2: mlab.points3d(start[1]/factY + minY, start[0]/factX + minX, 0.3, scale_factor=0.6, color=(0.0, 1.0, 0.0))
if self.dim==3: mlab.points3d(start[1]/factY + minY, start[0]/factX + minX, start[2], scale_factor=0.6, color=(0.0, 1.0, 0.0))
if goal:
if self.dim==2: mlab.points3d(goal[1]/factY + minY, goal[0]/factX + minX, 0.3, scale_factor=0.6, color=(1.0, 0.0, 0.0))
if self.dim==3: mlab.points3d(goal[1]/factY + minY, goal[0]/factX + minX, goal[2], scale_factor=0.6, color=(1.0, 0.0, 0.0))
self.DL._show3(ant=False)
'''
visualize only the strength map for one or all
APs. For combined strength map, use a=-1, else
use the ID of one of the APs
'''
def visualizeSMap(self, ap=-1):
self.C.show(a=ap, figsize=(16,9))
plt.show()
| 2.609375 | 3 |
03_LearnDSP-Python/Sec05_ExtendDFT/case03_InterpolateDFT.py | iChunyu/signal-process-demo | 15 | 12771741 | <gh_stars>10-100
'''
Interpolate DFT to correct frequency/amplitude/phase
Take a cosine wave for example
XiaoCY 2021-02-23
'''
#%%
import numpy as np
import matplotlib.pyplot as plt
fs = 1000
N = 2000
t = np.arange(N)/fs
ax = 3.0
fx = 12.3
px = 0.5
x = ax*np.cos(2*np.pi*fx*t+px)
#%% uncorrect DFT
X = np.fft.fft(x,axis=0)
Nf = int(np.round(N/2.0+0.5))
f = np.arange(N)*fs/N
A = np.abs(X[:Nf])*2.0/N
k = np.argmax(A)
f1 = f[k]
a1 = A[k]
p1 = np.angle(X[k])
#%% ratio method: rectangular window
if A[k-1] > A[k+1]:
dk = -A[k-1]/(A[k-1]+A[k])
else:
dk = A[k+1]/(A[k+1]+A[k])
f2 = (k+dk)*fs/N
a2 = a1/np.sinc(dk)
p2 = p1-dk*np.pi
#%% ratio method: hanning window
win = np.hanning(N)
X3 = np.fft.fft(x*win,axis=0)
A3 = np.abs(X3[:Nf])*2.0/np.sum(win)
k3 = np.argmax(A3)
a3 = A3[k3]
p3 = np.angle(X3[k3])
if A3[k3-1] > A3[k3+1]:
dk = (A3[k3]-2*A3[k3-1])/(A3[k3-1]+A3[k3])
else:
dk = (2.0*A3[k3+1]-2*A3[k3])/(A3[k3+1]+A3[k3])
f3 = (k3+dk)*fs/N
a3 = a3*(1-dk**2)/np.sinc(dk)
p3 = p3-dk*np.pi
#%% results
print('%-18s%-12s%-12s%-12s%-12s\n' % ('','FFT','Rectwin','Hanning','True Value'))
print('%-18s%-12f%-12f%-12f%-12f\n' % ('Frequency (Hz)',f1,f2,f3,fx))
print('%-18s%-12f%-12f%-12f%-12f\n' % ('Amplitude',a1,a2,a3,ax))
print('%-18s%-12f%-12f%-12f%-12f\n' % ('Phase (rad)',p1,p2,p3,px))
xe1 = a1*np.cos(2*np.pi*f1*t+p1)-x
xe2 = a2*np.cos(2*np.pi*f2*t+p2)-x
xe3 = a3*np.cos(2*np.pi*f3*t+p3)-x
plt.figure()
plt.plot(t,xe1)
plt.plot(t,xe2)
plt.plot(t,xe3)
plt.grid()
plt.legend(('FFT','rect-correct','hann-corrcet'))
plt.xlabel('Time (s)')
plt.ylabel('Error')
plt.show() | 2.671875 | 3 |
imle_tf.py | MoustafaMeshry/StEP | 4 | 12771742 | <filename>imle_tf.py
"""
Implentation of the noise-to-latent mapping approach used in the
"Non-Adversarial Image Synthesis wit Generative Latent Nearest Neighbors" paper,
which can be found at https://arxiv.org/abs/1812.08985.
"""
import collections
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import os.path as osp
from sklearn.neighbors import NearestNeighbors
import sys
import tensorflow as tf
import time
Hyperparams = collections.namedtuple(
'Hyperarams',
'base_lr batch_size num_epochs decay_step decay_rate staleness num_samples_factor')
Hyperparams.__new__.__defaults__ = (None, None, None, None, None, None, None)
class MLPImplicitModel(tf.keras.Model):
def __init__(self,
z_dim,
num_hidden_layers=3,
hidden_dim=128,
act=tf.nn.tanh, # tf.nn.leakey_relu,
name='imle'):
"""Initializes the IMLE model.
Args:
z_dim: An integer specifying the latent space dimentionality.
num_hidden_layers: An integer specifying the number of hidden MLP layers.
hidden_dim: An integer specifying the hidden MLP layers dimentionality,
this number is used only if 'num_hidden_layers' > 0.
act: Activation function for hidden MLP layers.
name: A string specifying a name for the model.
"""
super(MLPImplicitModel, self).__init__(name=name)
self.z_dim = z_dim
self.num_hidden_layers = num_hidden_layers
self.hidden_dim = hidden_dim
self._blocks = []
# Hidden MLP layers with non-linearities.
for _ in range(num_hidden_layers):
self._blocks.append(tf.keras.layers.Dense(
hidden_dim, activation=act, use_bias=True)
)
# Final linear MLP
self._blocks.append(tf.keras.layers.Dense(
z_dim, activation=None, use_bias=True))
def __call__(self, z):
for block_fn in self._blocks:
z = block_fn(z)
return z
class IMLE():
def __init__(self,
z_dim,
num_hidden_layers=3,
hidden_dim=128,
act=tf.nn.tanh):
"""Builds the IMLE model.
Args:
train_dir: A string specifying the train directory to save checkpoints
and summaries.
z_dim: An integer specifying the latent space dimentionality.
num_hidden_layers: An integer specifying the number of hidden MLP layers.
hidden_dim: An integer specifying the hidden MLP layers dimentionality,
this number is used only if 'num_hidden_layers' > 0.
act: Activation function for hidden MLP layers.
"""
self.z_dim = z_dim
self.model = MLPImplicitModel(z_dim=z_dim,
num_hidden_layers=num_hidden_layers,
hidden_dim=hidden_dim,
act=act)
def train(self, train_dir, data_np, hyperparams, data_dir, shuffle_data=True,
save_checkpoint_secs=300, save_summaries_steps=1,
noise_perturbation=0.01, log_steps=1):
loss_fn = tf.nn.l2_loss
global_step = tf.train.get_or_create_global_step()
inc_global_step_op = tf.assign(global_step, global_step + 1)
input_batch_ph = tf.compat.v1.placeholder(
tf.float32, shape=[None, self.z_dim], name='input')
gt_batch_ph = tf.compat.v1.placeholder(
tf.float32, shape=[None, self.z_dim], name='ground_truth')
output_samples = self.model(input_batch_ph)
self.input_batch_ph = input_batch_ph
self.output_samples = output_samples
lr_ph = tf.compat.v1.placeholder(tf.float32, shape=[], name='lr')
loss = tf.nn.l2_loss(output_samples - gt_batch_ph)
tf.losses.add_loss(loss)
tf.summary.scalar('l2_loss', loss, family='losses')
summary_op = tf.summary.merge_all(name='summary_op')
optimizer = tf.train.AdamOptimizer(
learning_rate=lr_ph,
beta1=0.5,
beta2=0.999)
train_op = tf.group(
inc_global_step_op,
optimizer.minimize(loss, var_list=tf.trainable_variables()))
batch_size = hyperparams.batch_size
num_batches = data_np.shape[0] // batch_size
num_samples = num_batches * hyperparams.num_samples_factor
if shuffle_data:
data_ordering = np.random.permutation(data_np.shape[0])
data_np = data_np[data_ordering]
data_np = data_np[:batch_size*num_batches]
# Reshape to [N, z_dim], where N is the size of the dataset.
data_np = np.reshape(
data_np, (data_np.shape[0], np.prod(data_np.shape[1:])))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(train_dir, sess.graph)
rand_samples = self.sample(len(data_np) // 10, sess)
plt.scatter(data_np[:, 0], data_np[:, 1], c='g')
plt.scatter(rand_samples[:, 0], rand_samples[:, 1], c='r')
plt.savefig(osp.join(data_dir, 'before_training.png'))
np.save(osp.join(data_dir, 'rand_samples.npy'), rand_samples)
plt.close()
for epoch in range(hyperparams.num_epochs):
if epoch % hyperparams.decay_step == 0:
lr = hyperparams.base_lr * hyperparams.decay_rate ** (
epoch // hyperparams.decay_step)
print('lr = %.8f' % lr)
if epoch % hyperparams.staleness == 0:
z_np = np.empty((num_samples * batch_size, self.z_dim))
samples_np = np.empty((num_samples * batch_size, self.z_dim))
for i in range(num_samples):
z = np.random.normal(size=(batch_size, self.z_dim))
samples = sess.run(self.output_samples, feed_dict={input_batch_ph: z})
z_np[i*batch_size:(i+1)*batch_size] = z
samples_np[i*batch_size:(i+1)*batch_size] = samples
st = time.time()
# nbrs = NearestNeighbors(n_neighbors=1, algorithm='ball_tree').fit(samples_np)
nbrs = NearestNeighbors(n_neighbors=1, algorithm='ball_tree', metric='euclidean').fit(samples_np)
# nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute', metric='minkowski').fit(samples_np)
# nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute', metric='euclidean').fit(samples_np)
# nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute', metric='cosine').fit(samples_np)
fit_time = time.time() - st
print('kNN fitting took %.2f' % fit_time)
st = time.time()
_, nearest_indices = nbrs.kneighbors(data_np)
query_time = time.time() - st
print('kNN query took %.2f' % query_time)
nearest_indices = np.array(nearest_indices)[:,0]
print(nearest_indices.shape)
print(len(nearest_indices), len(np.unique(nearest_indices)))
z_np = z_np[nearest_indices,]
z_np += noise_perturbation * np.random.randn(*z_np.shape)
del samples_np
epoch_err = 0.
for i in range(num_batches):
cur_z = z_np[i*batch_size:(i+1)*batch_size]
cur_data = data_np[i*batch_size:(i+1)*batch_size]
_, summary, loss_val, step = sess.run(
[train_op, summary_op, loss, global_step], feed_dict={
lr_ph: lr, input_batch_ph: cur_z, gt_batch_ph: cur_data})
epoch_err += loss_val
if step % log_summary_steps == 0:
writer.add_summary(summary, step)
print("Epoch %d: Error: %f" % (epoch, epoch_err / num_batches))
rand_samples = self.sample(len(data_np) // 10, sess)
plt.scatter(data_np[:, 0], data_np[:, 1], c='g')
plt.scatter(rand_samples[:, 0], rand_samples[:, 1], c='r')
plt.savefig(osp.join(data_dir, 'epoch_%d.png' % epoch))
np.save(osp.join(data_dir, 'rand_samples.npy'), rand_samples)
plt.close()
final_samples = self.sample(64, sess)
np.save(osp.join(data_dir, 'rand_samples.npy'), final_samples)
saver = tf.compat.v1.train.Saver()
saver.save(sess, osp.join(train_dir, 'mapper'))
def sample(self, num_samples, sess):
z_np = np.random.normal(size=(num_samples, self.z_dim))
samples_np = sess.run(self.output_samples, feed_dict={self.input_batch_ph: z_np})
return samples_np
def main(*args):
dataset_name = 'edges2shoes'
data_dir = '/vulcan/scratch/mmeshry/appearance_pretraining/train/edges2shoes/custom/edges2shoes-custom_staged_v2-pretrain_handbags-r2-finetune/exported_styles'
train_data = np.load(osp.join(data_dir, 'zs_train.npy'))
if train_data.shape[0] > 50000:
print('*** Warning: shuffling and truncating dataset to 50K records only!')
idxs = np.random.permutation(train_data.shape[0])
train_data = train_data[idxs[:50000], :]
z_dim = 8
imle = IMLE(z_dim,
num_hidden_layers=3,
hidden_dim=128,
act=tf.nn.tanh)
vars_all = tf.trainable_variables()
# Hyperparameters:
# ----------------
# base_lr: Base learning rate
# batch_size: Batch size
# num_epochs: Number of epochs
# decay_step: Number of epochs before learning rate decay
# decay_rate: Rate of learning rate decay
# staleness: Number of times to re-use nearest samples
# num_samples_factor: Ratio of the number of generated samples to the number of real data examples
if not osp.exists(data_dir):
os.makedirs(data_dir)
imle.train(data_dir, train_data, Hyperparams(
base_lr=1e-2,
batch_size=64,
num_epochs=500,
decay_step=50,
decay_rate=0.7,
staleness=5,
num_samples_factor=10,
),
data_dir)
if __name__ == '__main__':
main(*sys.argv[1:])
| 2.765625 | 3 |
6. sorting_algorithms/heap_sort.py | sourcery-ai-bot/udacity-datastructures-algorithms | 3 | 12771743 | from typing import List
def heapify(arr: List[int], n: int, i: int):
# Using i as the index of the current node, find the 2 child nodes
# (if the array were a binary tree)
# and find the largest value.
# If one of the children is larger swap the values and recurse into that subtree
# consider current index as largest
largest_index = i
left_node = 2 * i + 1
right_node = 2 * i + 2
# compare with left child
if left_node < n and arr[i] < arr[left_node]:
largest_index = left_node
# compare with right child
if right_node < n and arr[largest_index] < arr[right_node]:
largest_index = right_node
# if either of left / right child is the largest node
if largest_index != i:
arr[i], arr[largest_index] = arr[largest_index], arr[i]
heapify(arr, n, largest_index)
def heapsort(arr):
# First convert the array into a maxheap by calling heapify on each node, starting from the end
# now that you have a maxheap, you can swap the first element (largest) to the end (final position)
# and make the array minus the last element into maxheap again. Continue to do this until the whole
# array is sorted
n = len(arr)
# Build a maxheap.
for i in range(n, -1, -1): # only need range(n // 2 - 1, -1, -1):
heapify(arr, n, i)
# One by one extract elements
for i in range(n - 1, 0, -1):
arr[i], arr[0] = arr[0], arr[i] # swap
heapify(arr, i, 0)
test0 = [3, 7, 4, 6, 1, 0, 9, 8, 9, 4, 3, 5]
heapsort(test0)
assert test0 == sorted(test0)
test1 = [5, 5, 5, 3, 3, 3, 4, 4, 4, 4]
heapsort(test1)
assert test1 == sorted(test1)
test2 = [0, 1, 2, 5, 12, 21, 0]
heapsort(test2)
assert test2 == sorted(test2)
| 4 | 4 |
artworks/admin.py | mnosinov/artworks_project | 0 | 12771744 | from django.contrib import admin
from .models import Author, Genre, Artwork, Painting, Book, Media
class AuthorAdmin(admin.ModelAdmin):
list_display = ('id', 'name', )
list_display_links = ('id', 'name', )
search_fields = ('name', )
class GenreAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'artwork_type')
list_display_links = ('id', 'title', )
list_filter = ('artwork_type',)
search_fields = ('title', )
class ArtworkAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'type', 'genre', 'author', 'pub_year', 'price')
list_display_links = ('id', 'title', )
search_fields = ('title', )
class PaintingAdmin(admin.ModelAdmin):
list_display = ('artwork', 'height', 'width', 'paint')
list_display_links = ('artwork',)
class BookAdmin(admin.ModelAdmin):
list_display = ('artwork', 'pages', 'cover')
list_display_links = ('artwork',)
class MediaAdmin(admin.ModelAdmin):
list_display = ('artwork', 'media_type', 'duration')
list_display_links = ('artwork',)
admin.site.register(Author, AuthorAdmin)
admin.site.register(Genre, GenreAdmin)
admin.site.register(Artwork, ArtworkAdmin)
admin.site.register(Painting, PaintingAdmin)
admin.site.register(Book, BookAdmin)
admin.site.register(Media, MediaAdmin)
| 1.960938 | 2 |
security/logging/requests/utils.py | druids/django-security | 9 | 12771745 | import re
import json
from urllib.parse import parse_qs, urlparse
from json import JSONDecodeError
from django.template.defaultfilters import truncatechars
from django.utils.encoding import force_text
from security.config import settings
from security.utils import remove_nul_from_string
def is_base_collection(v):
return isinstance(v, (list, tuple, set))
def get_headers(request):
regex = re.compile('^HTTP_')
return dict((regex.sub('', header), value) for (header, value)
in request.META.items() if header.startswith('HTTP_'))
def regex_sub_groups_global(pattern, repl, string):
"""
Globally replace all groups inside pattern with `repl`.
If `pattern` doesn't have groups the whole match is replaced.
"""
for search in reversed(list(re.finditer(pattern, string))):
for i in range(len(search.groups()), 0 if search.groups() else -1, -1):
start, end = search.span(i)
string = string[:start] + repl + string[end:]
return string
def flat_params(params):
return {
k: v[0] if is_base_collection(v) and len(v) == 1 else v
for k, v in params.items()
}
def list_params(params):
return {
k: list(v) if is_base_collection(v) else [v]
for k, v in params.items()
}
def get_logged_params(url):
return flat_params(parse_qs(urlparse(url).query))
def hide_sensitive_data_body(content):
if settings.HIDE_SENSITIVE_DATA:
for pattern in settings.HIDE_SENSITIVE_DATA_PATTERNS.get('BODY', ()):
content = regex_sub_groups_global(pattern, settings.SENSITIVE_DATA_REPLACEMENT, content)
return content
def hide_sensitive_data_headers(headers):
if settings.HIDE_SENSITIVE_DATA:
headers = dict(headers)
for pattern in settings.HIDE_SENSITIVE_DATA_PATTERNS.get('HEADERS', ()):
for header_name, header in headers.items():
if re.match(pattern, header_name, re.IGNORECASE):
headers[header_name] = settings.SENSITIVE_DATA_REPLACEMENT
return headers
def hide_sensitive_data_queries(queries):
if settings.HIDE_SENSITIVE_DATA:
queries = dict(queries)
for pattern in settings.HIDE_SENSITIVE_DATA_PATTERNS.get('QUERIES', ()):
for query_name, query in queries.items():
if re.match(pattern, query_name, re.IGNORECASE):
queries[query_name] = (
len(query) * [settings.SENSITIVE_DATA_REPLACEMENT] if is_base_collection(query)
else settings.SENSITIVE_DATA_REPLACEMENT
)
return queries
def truncate_json_data(data):
if isinstance(data, dict):
return {key: truncate_json_data(val) for key, val in data.items()}
elif isinstance(data, list):
return [truncate_json_data(val) for val in data]
elif isinstance(data, str):
return truncatechars(data, settings.LOG_JSON_STRING_LENGTH)
else:
return data
def truncate_body(content, max_length):
content = force_text(content, errors='replace')
if len(content) > max_length:
try:
json_content = json.loads(content)
return (
json.dumps(truncate_json_data(json_content))
if isinstance(json_content, (dict, list)) and settings.LOG_JSON_STRING_LENGTH is not None
else content[:max_length + 1]
)
except JSONDecodeError:
return content[:max_length + 1]
else:
return content
def clean_body(body, max_length):
if body is None:
return body
body = force_text(body, errors='replace')
cleaned_body = truncatechars(
truncate_body(body, max_length), max_length + len(settings.SENSITIVE_DATA_REPLACEMENT)
) if max_length is not None else str(body)
cleaned_body = hide_sensitive_data_body(remove_nul_from_string(cleaned_body)) if cleaned_body else cleaned_body
cleaned_body = truncatechars(cleaned_body, max_length) if max_length else cleaned_body
return cleaned_body
def clean_json(data):
return {remove_nul_from_string(k): remove_nul_from_string(v) if isinstance(v, str) else v for k, v in data.items()}
def clean_headers(headers):
return hide_sensitive_data_headers(clean_json(headers)) if headers else headers
def clean_queries(queries):
return hide_sensitive_data_queries(clean_json(queries)) if queries else queries
def log_input_request_with_data(request, related_objects=None, slug=None, extra_data=None):
input_request_logger = getattr(request, 'input_request_logger', None)
if not input_request_logger:
return False
if related_objects:
input_request_logger.add_related_objects(*related_objects)
if slug:
input_request_logger.set_slug(slug)
if extra_data:
input_request_logger.update_extra_data(extra_data)
return True
| 2.296875 | 2 |
SimG4CMS/HGCalTestBeam/python/hgcalTBMBAnalyzerCERN_cfi.py | PKUfudawei/cmssw | 2 | 12771746 | import FWCore.ParameterSet.Config as cms
from SimG4CMS.HGCalTestBeam.hgcalTBMBAnalyzer_cfi import *
hgcalTBMBAnalyzerCERN = hgcalTBMBAnalyzer.clone()
| 1.109375 | 1 |
examples/pbc/22-k_points_mp2_stagger.py | xinxing02/pyscf | 0 | 12771747 | #!/usr/bin/env python
'''
Example code for
k-point spin-restricted periodic MP2 calculation using the staggered mesh method
Author: <NAME> (<EMAIL>)
Reference: Staggered Mesh Method for Correlation Energy Calculations of Solids: Second-Order
Møller–Plesset Perturbation Theory, J. Chem. Theory Comput. 2021, 17, 8, 4733-4745
'''
from pyscf.pbc.mp.kmp2_stagger import KMP2_stagger
from pyscf.pbc import df, gto, scf, mp
'''
Hydrogen dimer
'''
cell = gto.Cell()
cell.pseudo = 'gth-pade'
cell.basis = 'gth-szv'
cell.ke_cutoff=100
cell.atom='''
H 3.00 3.00 2.10
H 3.00 3.00 3.90
'''
cell.a = '''
6.0 0.0 0.0
0.0 6.0 0.0
0.0 0.0 6.0
'''
cell.unit = 'B'
cell.verbose = 4
cell.build()
# HF calculation using FFTDF
nks_mf = [2,2,2]
kpts = cell.make_kpts(nks_mf, with_gamma_point=True)
kmf = scf.KRHF(cell, kpts, exxdiv='ewald')
ehf = kmf.kernel()
# staggered mesh KMP2 calculation using two submeshes of size [1,1,1] in kmf.kpts
kmp = KMP2_stagger(kmf, flag_submesh=True)
emp2 = kmp.kernel()
assert((abs(emp2 - -0.0160902544091997))<1e-5)
# staggered mesh KMP2 calculation using two meshes of size [2,2,2], one of them is kmf.kpts
kmp = KMP2_stagger(kmf, flag_submesh=False)
emp2 = kmp.kernel()
assert((abs(emp2 - -0.0140289970302513))<1e-5)
# standard KMP2 calculation
kmp = mp.KMP2(kmf)
emp2, _ = kmp.kernel()
assert((abs(emp2 - -0.0143904878990777))<1e-5)
# HF calculation using GDF
nks_mf = [2,2,2]
kpts = cell.make_kpts(nks_mf, with_gamma_point=True)
kmf = scf.KRHF(cell, kpts, exxdiv='ewald')
gdf = df.GDF(cell, kpts).build()
kmf.with_df = gdf
ehf = kmf.kernel()
# staggered mesh KMP2 calculation using two submeshes of size [1,1,1] in kmf.kpts
kmp = KMP2_stagger(kmf, flag_submesh=True)
emp2 = kmp.kernel()
assert((abs(emp2 - -0.0158364523431071))<1e-5)
# staggered mesh KMP2 calculation using two meshes of size [2,2,2], one of them is kmf.kpts
kmp = KMP2_stagger(kmf, flag_submesh=False)
emp2 = kmp.kernel()
assert((abs(emp2 - -0.0140280303691396))<1e-5)
# standard KMP2 calculation
kmp = mp.KMP2(kmf)
emp2, _ = kmp.kernel()
assert((abs(emp2 - -0.0141829343769316))<1e-5)
'''
Diamond system
'''
cell = gto.Cell()
cell.pseudo = 'gth-pade'
cell.basis = 'gth-szv'
cell.ke_cutoff=100
cell.atom='''
C 0. 0. 0.
C 1.26349729, 0.7294805 , 0.51582061
'''
cell.a = '''
2.52699457, 0. , 0.
1.26349729, 2.18844149, 0.
1.26349729, 0.7294805 , 2.06328243
'''
cell.unit = 'angstrom'
cell.verbose = 4
cell.build()
# HF calculation using FFTDF
nks_mf = [2,2,2]
kpts = cell.make_kpts(nks_mf, with_gamma_point=True)
kmf = scf.KRHF(cell, kpts, exxdiv='ewald')
ehf = kmf.kernel()
# staggered mesh KMP2 calculation using two submeshes of size [1,1,1] in kmf.kpts
kmp = KMP2_stagger(kmf, flag_submesh=True)
emp2 = kmp.kernel()
assert((abs(emp2 - -0.156289981810986))<1e-5)
# staggered mesh KMP2 calculation using two meshes of size [2,2,2], one of them is kmf.kpts
kmp = KMP2_stagger(kmf, flag_submesh=False)
emp2 = kmp.kernel()
assert((abs(emp2 - -0.105454107635884))<1e-5)
# standard KMP2 calculation
kmp = mp.KMP2(kmf)
emp2, _ = kmp.kernel()
assert((abs(emp2 - -0.095517731535516))<1e-5)
# HF calculation using GDF
nks_mf = [2,2,2]
kpts = cell.make_kpts(nks_mf, with_gamma_point=True)
kmf = scf.KRHF(cell, kpts, exxdiv='ewald')
gdf = df.GDF(cell, kpts).build()
kmf.with_df = gdf
ehf = kmf.kernel()
# staggered mesh KMP2 calculation using two submeshes of size [1,1,1] in kmf.kpts
kmp = KMP2_stagger(kmf, flag_submesh=True)
emp2 = kmp.kernel()
assert((abs(emp2 - -0.154923152683604))<1e-5)
# staggered mesh KMP2 calculation using two meshes of size [2,2,2], one of them is kmf.kpts
kmp = KMP2_stagger(kmf, flag_submesh=False)
emp2 = kmp.kernel()
assert((abs(emp2 - -0.105421948003715))<1e-5)
# standard KMP2 calculation
kmp = mp.KMP2(kmf)
emp2, _ = kmp.kernel()
assert((abs(emp2 - -0.0952009565805345))<1e-5)
| 2.328125 | 2 |
defender/middleware.py | Korred/django-defender | 1 | 12771748 | try:
from django.utils.deprecation import MiddlewareMixin as MIDDLEWARE_BASE_CLASS
except ImportError:
MIDDLEWARE_BASE_CLASS = object
from django.contrib.auth import views as auth_views
from django.utils.decorators import method_decorator
from .decorators import watch_login
class FailedLoginMiddleware(MIDDLEWARE_BASE_CLASS):
""" Failed login middleware """
patched = False
def __init__(self, *args, **kwargs):
super(FailedLoginMiddleware, self).__init__(*args, **kwargs)
# Watch the auth login.
# Monkey-patch only once - otherwise we would be recording
# failed attempts multiple times!
if not FailedLoginMiddleware.patched:
# Django 1.11 turned the `login` function view into the
# `LoginView` class-based view
try:
from django.contrib.auth.views import LoginView
our_decorator = watch_login()
watch_login_method = method_decorator(our_decorator)
LoginView.dispatch = watch_login_method(LoginView.dispatch)
except ImportError: # Django < 1.11
auth_views.login = watch_login()(auth_views.login)
FailedLoginMiddleware.patched = True
| 2.15625 | 2 |
mmf/datasets/processors/video_processors.py | dk25021999/mmf | 1,928 | 12771749 | # Copyright (c) Facebook, Inc. and its affiliates.
# TODO: Once internal torchvision transforms become stable either in torchvision
# or in pytorchvideo, move to use those transforms.
import random
import mmf.datasets.processors.functional as F
import torch
from mmf.common.registry import registry
from mmf.datasets.processors import BaseProcessor
@registry.register_processor("video_random_crop")
class VideoRandomCrop(BaseProcessor):
def __init__(self, *args, size=None, **kwargs):
super().__init__()
if size is None:
raise TypeError("Parameter 'size' is required")
self.size = size
@staticmethod
def get_params(vid, output_size):
"""Get parameters for ``crop`` for a random crop.
"""
h, w = vid.shape[-2:]
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, vid):
i, j, h, w = self.get_params(vid, self.size)
return F.video_crop(vid, i, j, h, w)
@registry.register_processor("video_center_crop")
class VideoCenterCrop(BaseProcessor):
def __init__(self, *args, size=None, **kwargs):
super().__init__()
if size is None:
raise TypeError("Parameter 'size' is required")
self.size = size
def __call__(self, vid):
return F.video_center_crop(vid, self.size)
@registry.register_processor("video_resize")
class VideoResize(BaseProcessor):
def __init__(self, *args, size=None, **kwargs):
if size is None:
raise TypeError("Parameter 'size' is required")
self.size = size
def __call__(self, vid):
return F.video_resize(vid, self.size)
@registry.register_processor("video_to_tensor")
class VideoToTensor(BaseProcessor):
def __init__(self, *args, **kwargs):
super().__init__()
pass
def __call__(self, vid):
return F.video_to_normalized_float_tensor(vid)
@registry.register_processor("video_normalize")
class VideoNormalize(BaseProcessor):
def __init__(self, mean=None, std=None, **kwargs):
super().__init__()
if mean is None and std is None:
raise TypeError("'mean' and 'std' params are required")
self.mean = mean
self.std = std
def __call__(self, vid):
return F.video_normalize(vid, self.mean, self.std)
@registry.register_processor("video_random_horizontal_flip")
class VideoRandomHorizontalFlip(BaseProcessor):
def __init__(self, p=0.5, **kwargs):
super().__init__()
self.p = p
def __call__(self, vid):
if random.random() < self.p:
return F.video_hflip(vid)
return vid
@registry.register_processor("video_pad")
class Pad(BaseProcessor):
def __init__(self, padding=None, fill=0, **kwargs):
super().__init__()
if padding is None:
raise TypeError("Parameter 'padding' is required")
self.padding = padding
self.fill = fill
def __call__(self, vid):
return F.video_pad(vid, self.padding, self.fill)
@registry.register_processor("truncate_or_pad")
class TruncateOrPad(BaseProcessor):
# truncate or add 0 until the desired output size
def __init__(self, output_size=None, **kwargs):
super().__init__()
if output_size is None:
raise TypeError("Parameter 'output_size' is required")
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
if sample.shape[1] >= self.output_size:
return sample[0, : self.output_size]
else:
return torch.cat(
(sample[0, :], torch.zeros(1, self.output_size - sample.shape[1])),
axis=1,
)
| 2.25 | 2 |
util/langUtil.py | herougan/TradeHunter | 0 | 12771750 | import math
import os
from datetime import timedelta, datetime
import unicodedata
from typing import List
import re
import pandas as pd
from dateutil import parser
# timedelta/datetime
def strtotimedelta(s: str):
"""XM X minutes, XH X hours, Xd X days, Xw X weeks, Xm X months, all separated by a space"""
t = timedelta()
s_array = s.split()
for _s in s_array: # Only return first timedelta
(d, a) = drsplit(_s)
if d is None:
pass
elif a.lower() == "m" or "minute".casefold() in a.casefold():
t += timedelta(minutes=d)
elif a.lower() == "h" or "hour".casefold() in a.casefold():
t += timedelta(hours=d)
elif a.lower() == "d" or "day".casefold() in a.casefold():
t += timedelta(days=d)
elif a.lower() == "wk" or "week".casefold() in a.casefold():
t += timedelta(weeks=d)
elif a.lower() == "mo" or "month".casefold() in a.casefold():
t += timedelta(weeks=d * 4)
elif a.lower() == "y" or "year".casefold() in a.casefold():
t += timedelta(weeks=d * 48)
elif a.lower() == "s" or "second".casefold() in a.casefold():
t += timedelta(seconds=d)
elif s.lower() == "max":
return s
return t
def get_yahoo_intervals():
interval = ['1M', '2M', '5M', '15M', '30M', '60M', '1h', '90M', '1d', '5d', '1wk', '1mo', '3mo']
return interval
def strtoyahootimestr(s: str):
"""XM X minutes, XH X hours, Xd X days, Xw X weeks, Xm X months, all separated by a space
Interval closest to '1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo' will be chosen."""
interval = get_yahoo_intervals()
idx, prev_idx = len(interval) // 2, 0
left, right = 0, len(interval)
chosen_interval = strtotimedelta(s)
while not (right - left) < 2:
prev_idx = idx
diff = strtotimedelta(interval[idx]) - chosen_interval
# Check if chosen interval is smaller or greater than measured interval, move boundaries accordingly
if diff > timedelta(0):
right = idx
idx = (idx + left) // 2
elif diff == timedelta(0):
return interval[idx]
else:
left = idx
idx = (idx + right) // 2
# Compare which is better
diff1 = strtotimedelta(interval[idx]) - chosen_interval
diff2 = strtotimedelta(interval[prev_idx]) - chosen_interval
if diff1 > timedelta(0):
# interval_1 is larger than chosen interval, so interval_2 is smaller (diff2 is negative)
if diff1 + diff2 > timedelta(0):
# diff1 is larger than diff2, and so chosen interval is closer to interval_2
idx = prev_idx
else:
# interval_1 is smaller than chosen interval, so interval_2 is bigger (diff2 is positive, diff1 is negative)
if diff1 + diff2 < timedelta(0):
# diff2 (positive) is not large enough to compensate for diff1 and so chosen interval is closer to interval_2
idx = prev_idx
return interval[idx]
def checkifyahootimestr(s: str):
if re.match(r"^\d+[a-zA-Z]+$", s):
return True
return False
def timedeltatoyahootimestr(_interval: timedelta):
"""XM X minutes, XH X hours, Xd X days, Xw X weeks, Xm X months, all separated by a space
Interval closest to '1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo' will be chosen."""
interval = ['1m', '2m', '5m', '15m', '30m', '60m', '1h', '90m', '1d', '5d', '1wk', '1mo', '3mo']
idx, prev_idx = len(interval) // 2, 0
left, right = 0, len(interval)
chosen_interval = _interval
while not (right - left) < 2:
prev_idx = idx
diff = strtotimedelta(interval[idx]) - chosen_interval
# Check if chosen interval is smaller or greater than measured interval, move boundaries accordingly
if diff > timedelta(0):
right = idx
idx = (idx + left) // 2
elif diff == timedelta(0):
return interval[idx]
else:
left = idx
idx = (idx + right) // 2
# Compare which is better
diff1 = strtotimedelta(interval[idx]) - chosen_interval
diff2 = strtotimedelta(interval[prev_idx]) - chosen_interval
if diff1 > timedelta(0):
# interval_1 is larger than chosen interval, so interval_2 is smaller (diff2 is negative)
if diff1 + diff2 > timedelta(0):
# diff1 is larger than diff2, and so chosen interval is closer to interval_2
idx = prev_idx
else:
# interval_1 is smaller than chosen interval, so interval_2 is bigger (diff2 is positive, diff1 is negative)
if diff1 + diff2 < timedelta(0):
# diff2 (positive) is not large enough to compensate for diff1 and so chosen interval is closer to interval_2
idx = prev_idx
return interval[idx]
def timedeltatosigstr(s: timedelta):
"""Takes in datetime and returns string containing only one significant time denomination without spaces"""
if s.days > 0:
return F'{s.days}d'
elif s.seconds >= 60 * 60:
return F'{s.seconds // (60 * 60)}h'
elif s.seconds > 60:
return F'{s.seconds // 60}M'
else:
return F"{s.seconds}s"
def yahoolimitperiod(period: timedelta, interval: str):
"""Divides period into smaller chunks depending on the interval. Outputs new_period, n_loop"""
n_loop = 1
loop_period = period
min_dict = {
'1M': '7d',
'2M': '7d',
'5M': '7d',
'15M': '60d',
'30M': '60d',
'60M': '60d',
'90M': '60d',
'1h': '60d',
}
eff_interval = '1M'
diff = strtotimedelta(eff_interval) - strtotimedelta(interval)
for key in min_dict.keys():
_diff = strtotimedelta(key) - strtotimedelta(interval)
if timedelta() > _diff > diff:
diff = _diff
eff_interval = key
max_period = strtotimedelta(min_dict[eff_interval])
if period > max_period:
n_loop = math.ceil(period / max_period)
eff_period = period / n_loop
return eff_period, n_loop
return period, 1
def yahoolimitperiod_leftover(period: timedelta, interval: str):
"""Divides period into smaller defined chunks, depending on the interval.
Outputs new_period, n_loop and period_leftover"""
min_dict = {
'1m': '7d',
'2m': '7d',
'5m': '7d',
'15m': '60d',
'30m': '100d',
'60m': '365d',
'1h': '365d',
'90m': '365d',
'1d': '1000d',
}
eff_interval = '1m'
diff = strtotimedelta(eff_interval) - strtotimedelta(interval)
for key in min_dict.keys():
_diff = strtotimedelta(key) - strtotimedelta(interval)
if timedelta() > _diff > diff:
diff = _diff
eff_interval = key
max_period = strtotimedelta(min_dict[eff_interval])
if period > max_period:
n_loop = math.floor(period / max_period)
leftover = period - max_period * n_loop
if leftover < strtotimedelta(interval):
leftover = strtotimedelta(interval)
return max_period, n_loop, leftover
return period, 1, timedelta(0)
def strtodatetime(s: str) -> datetime:
# 2022 - 02 - 23
# OR
# 2022 - 02 - 23
# 09: 30:00 - 05: 00
# hh: mm:ss tzd
return parser.parse(s)
def check_if_valid_timestr(s: str):
return is_lnumber(s) and is_not_rnumber(s)
# String manipulation
def drsplit(s: str):
alpha = s.lstrip('0123456789')
digit = s[:len(s) - len(alpha)]
digit = try_int(digit)
if not digit:
digit = 0
return digit, alpha
def is_lnumber(s: str):
return not s == s.lstrip('0123456789')
def is_not_rnumber(s: str):
return s == s.rstrip('0123456789')
def is_datetime(v):
return isinstance(v, datetime)
def is_datetimestring(s):
return False # todo
# Names/File Names
def normify_name(s: str):
return s.replace(' ', '')
def snake_to_proper_case(s: str):
"""to_proper_case -> To Proper Case"""
s_arr = s.split('_')
for i in range(len(s_arr)):
s_arr[i] = s_arr[i].upper()
return ' '.join(s_arr)
def remove_special_char(s: str):
return s.replace('_', '')
def to_camel_case(s: str):
return s
# Try
def try_int(s: str) -> int:
try:
if s is None:
return 0
return int(s)
except ValueError:
return 0
def try_float(s: str) -> float:
try:
if s is None:
return 0
return float(s)
except ValueError:
return 0
def try_key(dict: {}, key: str):
if key in dict:
return dict['key']
else:
return "-"
def try_divide(n1, n2):
if n2 == 0:
return math.inf
return n1 / n2
def try_max(list):
if len(list) < 1:
return 0
return max(list)
def try_min(list):
if len(list) < 1:
return 0
return min(list)
def try_mean(list):
if len(list) < 1:
return 0
t, l = 0, len(list)
for i in list:
if i is None:
t += 0
l -= 0
continue
t += i
return try_divide(t, l)
def try_width(list):
if len([l for l in list if l is not None]) < 1:
return 0
max, min = 0, math.inf
# Get max and min
for x in list:
if x is None:
continue
if x > max:
max = x
if x < min:
min = x
if min == math.inf:
return math.inf
return max-min
def try_sgn(n1):
n1 = try_float(n1)
if n1:
sgn = math.copysign(1, n1)
return sgn
return 0
def in_range(n1, n2=[0, 1]):
n1 = try_float(n1)
if n1 and len(n2) >= 2:
if n2[0] < n1 < n2[1]:
return True
return False
def in_std_range(n1, avg, stdev, order=1):
return in_range(n1, [avg-order*stdev, avg+order*stdev])
# XVar
def pip_conversion(currency_pair: str):
if 'USD' in currency_pair and 'JPY' in currency_pair:
return 1 / 100
else:
return 1 / 10000
def leverage_to_float(lev: str):
"""Input: 'int1:int2. Output: int2/int1'"""
integers = lev.split(':')
if len(integers) == 2:
int1 = try_int(integers[0])
int2 = try_int(integers[1])
if int1 and int2:
return int2 / int1
return 0
def get_sim_speed(s: str):
d, _s = drsplit(s)
return d
# Instrument Type
def get_instrument_type(symbol: str):
# todo future
if symbol in ['CAD=X']:
return "Forex"
return "Forex"
def craft_instrument_filename(sym: str, interval: str, period: str):
return F'{sym}-{interval}-{period}.csv'
def get_instrument_from_filename(s: str):
parts = s.split('-')
l = len(parts) - 2
if l < 1:
return "", "", ""
period = parts[-1]
interval = parts[-2]
sym = "-".join(parts[0: l - 1])
return sym, interval, period
def craft_test_filename(ta_name: str, ivar_name: str, ds_names: List[str]):
"""Test name can be set by user. This is an auto-generated filename"""
return F'{ta_name}-{ivar_name}'
def get_size_bytes(bytes, suffix="B"):
"""
Scale bytes to its proper format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
factor = 1024
for unit in ["", "K", "M", "G", "T", "P"]:
if bytes < factor:
return f"{bytes:.2f}{unit}{suffix}"
bytes /= factor
def to_dataname(s, interval, period):
return F'{s}-{interval}-{timedeltatosigstr(period)}'
def from_dataname(s: str):
arr = s.split('-')
if len(arr) < 3:
return ('Str_Error', '', '')
return (arr[0], arr[1], arr[2])
def get_file_name(s: str):
return os.path.splitext(s)[0]
# Test
def get_test_name(s: str):
if s.endswith('.csv'):
return s[0:-4]
s_arr = s.split('.')
if len(s_arr) == 1:
return s
return '.'.join(s_arr[0:-2])
# data = yf.download( # or pdr.get_data_yahoo(...
# # tickers list or string as well
# tickers = "SPY AAPL MSFT",
#
# # use "period" instead of start/end
# # valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
# # (optional, default is '1mo')
# period = "ytd",
#
# # fetch data by interval (including intraday if period < 60 days)
# # valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
# # (optional, default is '1d')
# interval = "1m",
#
# # group by ticker (to access via data['SPY'])
# # (optional, default is 'column')
# group_by = 'ticker',
#
# # adjust all OHLC automatically
# # (optional, default is False)
# auto_adjust = True,
#
# # download pre/post regular market hours data
# # (optional, default is False)
# prepost = True,
#
# # use threads for mass downloading? (True/False/Integer)
# # (optional, default is True)
# threads = True,
#
# # proxy URL scheme use use when downloading?
# # (optional, default is None)
# proxy = None
# )
| 3.125 | 3 |