max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
saleor/api/payment/serializers.py | glosoftgroup/ps254-backend | 0 | 12759451 | # Payment rest api serializers
from rest_framework import serializers
from rest_framework.serializers import (
SerializerMethodField,
IntegerField
)
from datetime import datetime
from ...payment.models import MpesaPayment
class MpesaPaymentUpdateSerializer(serializers.ModelSerializer):
status = IntegerField(max_value=1, min_value=0)
class Meta:
model = MpesaPayment
fields = ('id',
'ref_number',
'status'
)
def update(self, instance, validated_data):
instance.id = validated_data.get('id', instance.id)
instance.status = validated_data.get('status', instance.status)
instance.save()
return instance
class MpesaPaymentListSerializer(serializers.ModelSerializer):
time = SerializerMethodField()
class Meta:
model = MpesaPayment
fields = ('id',
'ref_number',
'phone',
'amount',
'first_name',
'middle_name',
'last_name',
'time',
'status')
def get_time(self,obj):
time = obj.created.strftime("%d/%m/%Y %H:%M:%S %p")
return time
| 2.3125 | 2 |
scripts/entropy.py | 3D-e-Chem/snooker-alignment | 1 | 12759452 | <reponame>3D-e-Chem/snooker-alignment
#!/usr/bin/env python
"""
Calculate entropies of each leaf on each branch node of a tree for each column
Usage:
entropy.py -a ali.s9t100.fa -n gpcrdb_gapped_tm_numbering.csv -t ali.s9t100.ph > ali.s9t100.entropies
"""
import argparse
import collections
import logging
import math
from Bio import Phylo, AlignIO
import snooker
def calculate_entropies(tree_file, alignment_file, numbering_file,
min_node_size, max_node_size, number_format):
numberings = snooker.Numberings.from_file(numbering_file)
ali2gpcrdb = numberings.lookup(snooker.ALIGNMENT_POSITION, number_format)
alignment = AlignIO.read(alignment_file, 'fasta')
id2seq = {row.id: row.seq for row in alignment}
tree = Phylo.read(tree_file, 'newick')
all_leafs = set([leaf.name for leaf in tree.get_terminals()])
# for each column determine the aa distribution
all_counters = {}
for col in ali2gpcrdb:
all_counters[col] = collections.Counter([seq[col - 1] for seq in id2seq.values()])
print('{},{},{},{},{},{},{},{}'.format('node_id',
'alignment_pos',
number_format,
'entropy_inside',
'entropy_outside',
'score',
'variability_inside',
'variability_outside',
))
for node_id, node in enumerate(tree.get_nonterminals()):
leafs_of_node = set([leaf.name for leaf in node.get_terminals()])
if not (min_node_size <= len(leafs_of_node) <= max_node_size):
msg = '{} has {} leafs, skipping'.format(node, len(leafs_of_node))
logging.info(msg)
continue
leafs_outside_node = all_leafs - leafs_of_node
seqs_inside = [id2seq[v] for v in leafs_of_node]
nr_inside = float(len(leafs_of_node))
nr_outside = float(len(leafs_outside_node))
# loop over columns
for col in ali2gpcrdb:
aa_inside = collections.Counter([seq[col - 1] for seq in seqs_inside])
f_i_inside = 0
for count in aa_inside.values():
f_i_inside += count / nr_inside * math.log(count / nr_inside)
entropy_inside = -1 * f_i_inside
variability_inside = len(aa_inside)
aa_outside = all_counters[col] - aa_inside
f_i_outside = 0
for aa, count in aa_outside.items():
f_i_outside += count / nr_outside * math.log(count / nr_outside)
entropy_outside = -1 * f_i_outside
variability_outside = len(aa_outside)
distinct_aa = 21 # all amino acids and gap (-)
score = math.sqrt(pow(abs(math.log(1.0 / distinct_aa)) - entropy_outside, 2)
+ pow(entropy_inside, 2))
print('{},{},{},{},{},{},{},{}'.format(node_id,
col,
ali2gpcrdb[col],
entropy_inside,
entropy_outside,
score,
variability_inside,
variability_outside,
))
parser = argparse.ArgumentParser(description='Calculate entropies of each leaf on each branch node of a tree for each column')
parser.add_argument('-a', '--alignment', type=argparse.FileType('r'), required=True, help='Multiple sequence alignment (fasta format)')
parser.add_argument('-n', '--numbering', type=argparse.FileType('r'), required=True, help='Numbering file, translate sequence alignment position into generic numbering scheme')
parser.add_argument('-t', '--tree', type=argparse.FileType('r'), required=True, help='Tree of multiple sequence alignment (newick format)')
parser.add_argument('--min_node_size', type=int, default=20, help='Calculate entropies for nodes with a minimum number of leafs')
parser.add_argument('--max_node_size', type=int, default=20, help='Calculate entropies for nodes with a maximum number of leafs')
parser.add_argument('--number_format', default='gpcrdb_alignment', help='Column from numbering file to include in output')
args = parser.parse_args()
calculate_entropies(args.tree, args.alignment, args.numbering,
args.min_node_size, args.max_node_size, args.number_format)
| 2.25 | 2 |
fetchr/__init__.py | fetch-r/py-sdk | 0 | 12759453 | <filename>fetchr/__init__.py
from .fetchr_client import FetchRClient
from .fetchr_client import FetchRException
| 1.242188 | 1 |
sort_ics.py | martinp26/ics-tools | 5 | 12759454 | #!/usr/bin/python
from __future__ import print_function
from icalendar import Calendar
import sys
def get_key_value(a):
val = ""
if a.has_key("UID"):
val = a["UID"]
elif a.has_key("DTSTART"):
val = a["DTSTART"]
elif a.has_key("DESCRIPTION"):
val = a["DESCRIPTION"]
elif a.has_key("SUMMARY"):
val = a["SUMMARY"]
elif a.has_key("SUMMARY"):
val = a["SUMMARY"]
return val
if len(sys.argv) < 3:
print("Usage: sort_ics.py in.ics out.ics")
sys.exit(1)
cal = Calendar.from_ical(open(sys.argv[1], 'rb').read())
cal.subcomponents.sort(key=get_key_value)
# comps = cal.subcomponents
# print(comps)
# comps.sort(key=get_key_value)
# print(comps)
f = open(sys.argv[2], 'wb')
f.write(cal.to_ical())
f.close()
| 2.859375 | 3 |
utils/loss.py | gjy3035/WSAL_released | 22 | 12759455 | <gh_stars>10-100
import torch.nn.functional as F
from torch import nn
import pdb
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, size_average=True):
super(CrossEntropyLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average)
def forward(self, inputs, targets):
return self.nll_loss(F.log_softmax(inputs), targets)
# TODO: fix bugs
class BCELoss2d(nn.Module):
def __init__(self, ignored_label=None, size_average=True):
super(BCELoss2d, self).__init__()
self.ignored_label = ignored_label
self.size_average = size_average
self.weight = None
def forward(self, inputs, targets):
n, c, h, w = inputs.size()
inputs = inputs.transpose(1, 2).transpose(2, 3).contiguous()
if self.ignored_label is None:
inputs = inputs.view(-1, c)
targets = targets.view(-1)
pdb.set_trace()
else:
useful_idx = targets != self.ignored_label
inputs = inputs[useful_idx.repeat(1, 1, 1, c)].view(-1, c)
targets = targets[useful_idx].view(-1)
return F.binary_cross_entropy(inputs, targets, self.weight, self.size_average)
class BCELoss2d_2(nn.Module):
def __init__(self, weight=None, size_average=True):
super(BCELoss2d_2, self).__init__()
self.BCEWithLogitsLoss = nn.BCEWithLogitsLoss(weight, size_average)
def forward(self, inputs, targets):
return self.BCEWithLogitsLoss(F.log_softmax(inputs), targets) | 2.53125 | 3 |
angular_scaffold/management/commands/helpers/_generate_view.py | juanfe/django-angular-scaffold | 4 | 12759456 | <gh_stars>1-10
import os
def _touch(fname):
try:
os.utime(fname, None)
except Exception:
open(fname, 'a').close()
return fname
def _build(path, pwd=None):
current = path.pop(0)
if pwd:
here = os.path.join(pwd, current)
else:
here = current
if not path:
return _touch(here)
else:
if not os.path.exists(here):
os.makedirs(here)
_build(path, here)
def generate_view(directory, name=None):
if not name:
name = raw_input('View Name: ')
view = os.path.join("assets", "app", "views", name + ".html")
split = name.split(os.sep)
namespace = '-'.join(split)
filename = split[-1]
split[-1] = "_" + filename + ".scss"
style = os.path.join("assets", "lib", "styles", "site", os.sep.join(split))
# view html file
if not os.path.exists(os.path.join(directory, view)):
_build(view.split(os.sep), directory)
with open(os.path.join(directory, view), 'w') as f:
f.write("<div class='page %s'>\n\n</div>" % namespace)
else:
print "View Template Already Exists: %s" % namespace
# styles file
if not os.path.exists(os.path.join(directory, style)):
_build(style.split(os.sep), directory)
with open(os.path.join(directory, style), 'w') as f:
f.write(".page.%s{\n\n}" % namespace)
else:
print "View Styles Already Exists: %s" % style
# import styles styles
with open(os.path.join(directory, 'assets', 'lib', 'styles', 'styles.scss'), 'a') as styles:
styles.write('\n@import "site/%s";' % name) | 2.59375 | 3 |
tests/cases/sealed.py | MiguelMarcelino/py2many | 2 | 12759457 | <gh_stars>1-10
#!/usr/bin/env python3
from adt import adt as sealed
from dataclasses import dataclass
@dataclass
class Packet:
val: float
@sealed
class Register:
PACKET: Packet
VALUE: int
if __name__ == "__main__":
a = Register.VALUE(10)
print(a)
assert a.is_value()
a.value()
# assert a.value() == 10
b = Register.PACKET(Packet(1.3))
assert b.is_packet()
# assert b.packet().val == 1.3
b.packet()
print("OK")
| 2.96875 | 3 |
noncloud/python/maps/main.py | omarxs2/cloud-nebulous-serverless | 4 | 12759458 | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, render_template, request
import googlemaps
from settings import API_KEY
app = Flask(__name__)
GMAPS = googlemaps.Client(key=API_KEY)
ADDRESS = '1600 Amphitheatre Pkwy 94043'
@app.route('/', methods=['GET', 'POST'])
def mapsgeo(gcf_request=None):
"""
main handler - show form and possibly previous translation
"""
# Flask Request object passed in for Cloud Functions
# (use gcf_request for GCF but flask.request otherwise)
local_request = gcf_request if gcf_request else request
# reset all variables (GET)
address = ADDRESS
results = []
# form submission and if there is data to process (POST)
if local_request.method == 'POST':
address = local_request.form['address'].strip()
if not address:
address = ADDRESS
rsp = GMAPS.geocode(address)
if rsp:
for data in rsp:
if 'geometry' in data and 'location' in data['geometry']:
geocode = data['geometry']['location']
results.append({
'full_addr': data['formatted_address'],
'latlong': '%s, %s' % (geocode['lat'], geocode['lng']),
})
# create context & render template
context = {'address': address, 'results': results}
return render_template('index.html', **context)
if __name__ == '__main__':
import os
app.run(debug=True, threaded=True, host='0.0.0.0',
port=int(os.environ.get('PORT', 8080)))
| 2.640625 | 3 |
omaha_server/crash/tests/test_serializers.py | makar21/omaha-server | 142 | 12759459 | <reponame>makar21/omaha-server
# coding: utf8
"""
This software is licensed under the Apache 2 license, quoted below.
Copyright 2014 Crystalnix Limited
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""
from builtins import str
import os
from django.test import TestCase
from django.core.files.uploadedfile import SimpleUploadedFile
from omaha.tests.utils import temporary_media_root
from crash.models import Symbols, Crash
from crash.serializers import SymbolsSerializer, CrashSerializer
BASE_DIR = os.path.dirname(__file__)
TEST_DATA_DIR = os.path.join(BASE_DIR, 'testdata')
SYM_FILE = os.path.join(TEST_DATA_DIR, 'BreakpadTestApp.sym')
class SymbolsSerializerTest(TestCase):
def test_serializer(self):
data = dict(file=SimpleUploadedFile('./test.pdb', False),
debug_id='C1C0FA629EAA4B4D9DD2ADE270A231CC1',
debug_file='BreakpadTestApp.pdb')
symbols = Symbols.objects.create(**data)
self.assertDictEqual(SymbolsSerializer(symbols).data,
dict(id=symbols.id,
debug_id='C1C0FA629EAA4B4D9DD2ADE270A231CC1',
debug_file='BreakpadTestApp.pdb',
file=symbols.file.url,
file_size=symbols.file_size,
created=symbols.created.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
modified=symbols.modified.strftime('%Y-%m-%dT%H:%M:%S.%fZ'), ))
@temporary_media_root(MEDIA_URL='http://cache.pack.google.com/edgedl/chrome/install/782.112/')
def test_auto_fill_file_size(self):
with open(SYM_FILE, 'rb') as f:
data = dict(file=SimpleUploadedFile('./BreakpadTestApp.sym', f.read()))
symbols = SymbolsSerializer(data=data)
self.assertTrue(symbols.is_valid())
symbols_instance = symbols.save()
self.assertEqual(symbols_instance.debug_id, 'C1C0FA629EAA4B4D9DD2ADE270A231CC1')
self.assertEqual(symbols_instance.debug_file, 'BreakpadTestApp.pdb')
self.assertEqual(symbols_instance.file_size, 68149)
class CrashSerializerTest(TestCase):
maxDiff = None
@temporary_media_root(
CELERY_ALWAYS_EAGER=False,
CELERY_EAGER_PROPAGATES_EXCEPTIONS=False,
)
def test_serializer(self):
meta = dict(
lang='en',
version='1.0.0.1',
)
stacktrace_json = dict(
crashing_thread={},
)
app_id = '{D0AB2EBC-931B-4013-9FEB-C9C4C2225C8C}'
user_id = '{2882CF9B-D9C2-4edb-9AAF-8ED5FCF366F7}'
crash = Crash.objects.create(
appid=app_id,
userid=user_id,
upload_file_minidump=SimpleUploadedFile('./dump.dat', b''),
meta=meta,
stacktrace_json=stacktrace_json
)
self.assertDictEqual(CrashSerializer(crash).data,
dict(id=crash.id,
upload_file_minidump=crash.upload_file_minidump.url,
archive=None,
appid=str(crash.appid),
userid=str(crash.userid),
meta=meta,
signature=crash.signature,
stacktrace_json=crash.stacktrace_json,
created=crash.created.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
modified=crash.modified.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
os=None,
build_number=None,
channel=''))
| 2.09375 | 2 |
22Braket/Braket.py | Easonyesheng/CodePractice | 0 | 12759460 | """
数字 n 代表生成括号的对数,请你设计一个函数,用于能够生成所有可能的并且 有效的 括号组合。
示例:
输入:n = 3
输出:[
"((()))",
"(()())",
"(())()",
"()(())",
"()()()"
]
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/generate-parentheses
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
""" | 3.40625 | 3 |
main.py | Tall1n/ipfs | 0 | 12759461 | from pathlib import Path
from subprocess import run
def add_file(file_name):
filepath = Path(file_name).resolve()
command = ["ipfs", "add", "-Q", filepath]
result = run(command, capture_output=True, text=True)
ipfs_hash = result.stdout.strip()
return {ipfs_hash: filepath}
def find_file_locations(ipfs_hash):
command = ["ipfs", "dht", "findprovs", ipfs_hash]
result = run(command, capture_output=True, text=True)
ipfs_hash = result.stdout.strip()
return ipfs_hash
if __name__ == '__main__':
example_file_name = "hello"
file_added = add_file(example_file_name)
ipfs_hash_val, file_path = [(k, v) for k, v in file_added.items()][0]
file_added = find_file_locations(ipfs_hash_val)
file_added.splitlines()
| 3.078125 | 3 |
Hackerearth/The_minionGame.py | Shaswat-2203/HacktoberfestForBeginners | 115 | 12759462 |
def minion_game(string):
length = len(string)
the_vowel = "AEIOU"
kevin = 0
stuart = 0
for i in range(length):
if string[i] in the_vowel:
kevin = kevin + length - i
else:
stuart = stuart + length - i
if kevin > stuart:
print ("Kevin %d" % kevin)
elif kevin < stuart:
print ("Stuart %d" % stuart)
else:
print ("Draw")
| 3.734375 | 4 |
runserver.py | mcherniak/wintermute | 1 | 12759463 | import os
from wintermute import APP
def runserver():
port = int(os.environ.get('PORT', 5000))
APP.run(host='0.0.0.0', port=port, debug=True)
if __name__ == '__main__':
runserver()
| 1.875 | 2 |
inventorycalculator/handlers.py | 1T/InventoryCalculator | 0 | 12759464 | from typing import Dict, Any
from uuid import uuid4
from inventorycalculator.core.loaders.file_loader import FileLoader
from inventorycalculator.core.parsers.inventory_parser import InventoryParser
from inventorycalculator.core.repositories.dynamodb import DynamoDBTable
from inventorycalculator.core.storages.s3_storage import S3Storage
from inventorycalculator.core.workers.aws_lambda import AwsLambda
from inventorycalculator.errors import S3StorageError, DynamoDBError, AsyncWorkerError, InvalidInventoryDataFormatError
from inventorycalculator.settings import S3_BUCKET, TABLE_NAME, STATUSES, ASYNC_WORKER
from OneTicketLogging import elasticsearch_logger
_logger = elasticsearch_logger(__name__)
file_loader = FileLoader()
storage = S3Storage(S3_BUCKET)
db_table = DynamoDBTable(TABLE_NAME)
async_worker = AwsLambda(ASYNC_WORKER)
inventory_parser = InventoryParser()
def crawl_job_handler(event: Dict[str, Any], _: Any) -> Dict:
"""Creates inventory calculator job for async processing"""
_logger.info(event)
file_content = file_loader.by_url(event['url'])
job_id = str(uuid4())
job = {'job_id': job_id}
storage.upload(job_id, file_content)
async_worker.async_invoke(job)
db_table.put({
**job,
'status': STATUSES.RUNNING,
'total_value': 0
})
return job
def async_worker_handler(event: Dict[str, Any], _: Any):
"""Process the tickets"""
_logger.info(event)
job_id = event.get('job_id')
try:
db_table.get(job_id)
tickets = inventory_parser.from_tsv(storage.get(job_id))
total_value = sum([ticket.value for ticket in tickets])
db_table.put({
'job_id': job_id,
'status': STATUSES.SUCCEEDED,
'total_value': total_value
})
except (S3StorageError, DynamoDBError, InvalidInventoryDataFormatError) as e:
_logger.error(e)
db_table.put({
'job_id': job_id,
'status': STATUSES.FAILED
})
raise AsyncWorkerError(f'Unable to proceed job with "job_id":{job_id}')
def status_check_handler(event: Dict[str, Any], _: Any) -> Dict:
"""Check the status of tickets processing"""
_logger.info(event)
payload = db_table.get(event['job_id'])
return {
'status': payload['status'],
'total_value': payload['total_value']
}
| 2.03125 | 2 |
detection_example.py | frying-apple/CS_T0828_HW2 | 0 | 12759465 | <filename>detection_example.py<gh_stars>0
import json
import cv2
from yolo.backend.utils.box import draw_scaled_boxes
import os
import yolo
from yolo.frontend import create_yolo
import numpy as np
# 1. create yolo instance
yolo_detector = create_yolo("ResNet50", ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], 416)
# 2. load pretrained weighted file
# Pretrained weight file is at https://drive.google.com/drive/folders/1Lg3eAPC39G9GwVTCH3XzF73Eok-N-dER
DEFAULT_WEIGHT_FILE = os.path.join(yolo.PROJECT_ROOT, "weights.h5")
yolo_detector.load_weights(DEFAULT_WEIGHT_FILE)
# 3. Load images
import os
import matplotlib.pyplot as plt
DEFAULT_IMAGE_FOLDER = os.path.join(yolo.PROJECT_ROOT, "tests", "dataset", "svhn", "imgs")
img_files = [os.path.join(DEFAULT_IMAGE_FOLDER, "1.png"), os.path.join(DEFAULT_IMAGE_FOLDER, "2.png")]
imgs = []
for fname in img_files:
img = cv2.imread(fname)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
imgs.append(img)
plt.figure()
plt.imshow(img)
plt.show()
# 4. Predict digit region
THRESHOLD = 0.3
for img in imgs:
boxes, probs = yolo_detector.predict(img, THRESHOLD)
# 4. save detection result
image = draw_scaled_boxes(img,
boxes,
probs,
["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"])
print("{}-boxes are detected.".format(len(boxes)))
plt.figure()
plt.imshow(image)
plt.show()
# readme:
# clone https://github.com/penny4860/Yolo-digit-detector
# copy/paste detection_example.ipynb to here
# install stuff; must use h5py<3, or else: https://stackoverflow.com/questions/53740577/does-any-one-got-attributeerror-str-object-has-no-attribute-decode-whi
# install CUDA 10.0
# https://www.tensorflow.org/install/source#gpu
# put test images into DEFAULT_IMAGE_FOLDER
'''
print(json.dumps([{'a':1.0, 'b':2.0}, {'c':'hi', 'd':'bye'}]))
HW2_dict = {"bbox":[1,2,3,4], "score":[0.1,0.5], "label":[4,6]}
HW2_list_of_dicts = []
HW2_list_of_dicts.append(HW2_dict) # test image 1
# ...
HW2_list_of_dicts.append(HW2_dict) # ... test image 13068
print(json.dumps(HW2_list_of_dicts))
with open('test.json','w') as f:
json.dump(HW2_list_of_dicts, f)
'''
def to_json_list(json_items, bbox, score):
'''
json_items: []
bbox: (N,4) np array
score: (N,10) np array
label: (N,10) np array
'''
#labels = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
#json_items = []
#z = zip(bbox, score)
#for b, s in z:
# for k in range N
# b: bbox[k]
# s: score[k]
#d = {"bbox":b.tolist(), "score":s.tolist(), "label":labels[np.argmax(s)]}
#json_items.append(d)
#with open('0560841.json', 'w') as f:
# json.dump(json_items, f)
# list(map(tuple,boxes)) // [(74, 26, 99, 58), (96, 25, 123, 56)]
# list(map(np.max, score)) // [0.8480638, 0.8997333]
# list(map(lambda x: labels[np.argmax(x)],probs)) // [2, 3]
labels = np.array([0,1,2,3,4,5,6,7,8,9])
#labels = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
# re-order bbox (x1,y1,x2,y2)-->(y1,x1,y2,x2)
idx = np.array([1,0,3,2])
a = list(map(lambda x: tuple(x[idx].tolist()), bbox))
b = np.array(list(map(lambda x: np.max(x), score))).tolist()
#b = np.array(list(map(np.max, score))).tolist()
c = np.array(list(map(lambda x: labels[np.argmax(x)], score))).tolist()
d = {"bbox": a, "score": b, "label": c}
json_items.append(d)
return json_items
def to_json(json_items):
with open('0560841.json', 'w') as f:
json.dump(json_items, f)
# test json functions
json_items = []
for img in imgs:
boxes, probs = yolo_detector.predict(img, THRESHOLD)
json_items = to_json_list(json_items, boxes, probs)
to_json(json_items)
def sort_function(x):
x1 = x.split(os.sep)
last = x1[-1] # '1234.png'
x2 = last.split('.')
filenumber = x2[0] # '1234'
return int(filenumber) # 1234 // sort by this int
# using pretrained model from above
# change dir
DEFAULT_IMAGE_FOLDER = os.path.join(yolo.PROJECT_ROOT, "tests", "dataset", "svhn", "imgs_all", "test")
# get files
fn_list = []
for file in os.listdir(DEFAULT_IMAGE_FOLDER): # DONE: sort to alphabetical
if file.endswith(".png"):
fn = os.path.join(DEFAULT_IMAGE_FOLDER, file)
fn_list.append(fn)
fn_list = sorted(fn_list, key=sort_function)
# read files into list
imgs = [] # overwrite above
for fn in fn_list:
img = cv2.imread(fn)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
imgs.append(img)
# predict and save .json
print('predicting...')
json_items = []
count = 0
for img, fn in zip(imgs,fn_list):
boxes, probs = yolo_detector.predict(img, THRESHOLD)
json_items = to_json_list(json_items, boxes, probs)
count += 1
print('predicted img:', count, ' : ', fn)
to_json(json_items)
print('-- done')
# DONE: put images into DEFAULT_IMAGE_FOLDER
# install tensorflow-gpu==1.14.0
# TODO: https://docs.python.org/3/library/timeit.html#timeit-examples
# or
#
# import time
# t0 = time.perf_counter()
# ~~~code blob~~~
# t1 = time.perf_counter()
# print(t1-t0) | 2.578125 | 3 |
Models/attention.py | asdf2kr/BAM-CBAM-Pytorch- | 26 | 12759466 | <reponame>asdf2kr/BAM-CBAM-Pytorch-
import torch
import torch.nn as nn
from Models.conv import conv1x1, conv3x3, conv7x7
class BAM(nn.Module):
def __init__(self, in_channel, reduction_ratio, dilation):
super(BAM, self).__init__()
self.hid_channel = in_channel // reduction_ratio
self.dilation = dilation
self.globalAvgPool = nn.AdaptiveAvgPool2d(1)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
self.fc1 = nn.Linear(in_features=in_channel, out_features=self.hid_channel)
self.bn1_1d = nn.BatchNorm1d(self.hid_channel)
self.fc2 = nn.Linear(in_features=self.hid_channel, out_features=in_channel)
self.bn2_1d = nn.BatchNorm1d(in_channel)
self.conv1 = conv1x1(in_channel, self.hid_channel)
self.bn1_2d = nn.BatchNorm2d(self.hid_channel)
self.conv2 = conv3x3(self.hid_channel, self.hid_channel, stride=1, padding=self.dilation, dilation=self.dilation)
self.bn2_2d = nn.BatchNorm2d(self.hid_channel)
self.conv3 = conv3x3(self.hid_channel, self.hid_channel, stride=1, padding=self.dilation, dilation=self.dilation)
self.bn3_2d = nn.BatchNorm2d(self.hid_channel)
self.conv4 = conv1x1(self.hid_channel, 1)
self.bn4_2d = nn.BatchNorm2d(1)
def forward(self, x):
# Channel attention
Mc = self.globalAvgPool(x)
Mc = Mc.view(Mc.size(0), -1)
Mc = self.fc1(Mc)
Mc = self.bn1_1d(Mc)
Mc = self.relu(Mc)
Mc = self.fc2(Mc)
Mc = self.bn2_1d(Mc)
Mc = self.relu(Mc)
Mc = Mc.view(Mc.size(0), Mc.size(1), 1, 1)
# Spatial attention
Ms = self.conv1(x)
Ms = self.bn1_2d(Ms)
Ms = self.relu(Ms)
Ms = self.conv2(Ms)
Ms = self.bn2_2d(Ms)
Ms = self.relu(Ms)
Ms = self.conv3(Ms)
Ms = self.bn3_2d(Ms)
Ms = self.relu(Ms)
Ms = self.conv4(Ms)
Ms = self.bn4_2d(Ms)
Ms = self.relu(Ms)
Ms = Ms.view(x.size(0), 1, x.size(2), x.size(3))
Mf = 1 + self.sigmoid(Mc * Ms)
return x * Mf
#To-do:
class CBAM(nn.Module):
def __init__(self, in_channel, reduction_ratio, dilation=1):
super(CBAM, self).__init__()
self.hid_channel = in_channel // reduction_ratio
self.dilation = dilation
self.globalAvgPool = nn.AdaptiveAvgPool2d(1)
self.globalMaxPool = nn.AdaptiveMaxPool2d(1)
# Shared MLP.
self.mlp = nn.Sequential(
nn.Linear(in_features=in_channel, out_features=self.hid_channel),
nn.ReLU(),
nn.Linear(in_features=self.hid_channel, out_features=in_channel)
)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
self.conv1 = conv7x7(2, 1, stride=1, dilation=self.dilation)
def forward(self, x):
''' Channel attention '''
avgOut = self.globalAvgPool(x)
avgOut = avgOut.view(avgOut.size(0), -1)
avgOut = self.mlp(avgOut)
maxOut = self.globalMaxPool(x)
maxOut = maxOut.view(maxOut.size(0), -1)
maxOut = self.mlp(maxOut)
# sigmoid(MLP(AvgPool(F)) + MLP(MaxPool(F)))
Mc = self.sigmoid(avgOut + maxOut)
Mc = Mc.view(Mc.size(0), Mc.size(1), 1, 1)
Mf1 = Mc * x
''' Spatial attention. '''
# sigmoid(conv7x7( [AvgPool(F); MaxPool(F)]))
maxOut = torch.max(Mf1, 1)[0].unsqueeze(1)
avgOut = torch.mean(Mf1, 1).unsqueeze(1)
Ms = torch.cat((maxOut, avgOut), dim=1)
Ms = self.conv1(Ms)
Ms = self.sigmoid(Ms)
Ms = Ms.view(Ms.size(0), 1, Ms.size(2), Ms.size(3))
Mf2 = Ms * Mf1
return Mf2
| 2.4375 | 2 |
examples/futaba/futaba_burst_target_positions.py | karakuri-products/gs2d-python | 7 | 12759467 | <reponame>karakuri-products/gs2d-python
#! /usr/bin/env python3
# encoding: utf-8
import sys
import time
import logging
sys.path.insert(0, '../..')
from gs2d import SerialInterface, Futaba
# ログ設定
logging.basicConfig()
logging.getLogger('gs2d').setLevel(level=logging.DEBUG)
try:
# 初期化
si = SerialInterface()
futaba = Futaba(si)
# バーストトルクON
# enable: 1
sid_data = {
1: [1]
}
# Length: サーボ一つ分のデータ(VID+Data)のバイト数を指定。
# Length = VID(1) + Data(1) = 2
futaba.burst_write(Futaba.ADDR_TORQUE_ENABLE, 2, sid_data)
# 色んな角度にバースト設定
for position_degree in [0, 50, 0, -50, 0]:
# バーストポジション設定
sid_positions = {
# サーボID: ポジション
1: position_degree
}
futaba.set_burst_target_positions(sid_positions)
# 1秒待機
time.sleep(1.0)
# クローズ
futaba.close()
si.close()
except Exception as e:
print('Error', e)
| 2.109375 | 2 |
pype/hosts/harmony/plugins/publish/collect_palettes.py | simonebarbieri/pype | 44 | 12759468 | <reponame>simonebarbieri/pype<filename>pype/hosts/harmony/plugins/publish/collect_palettes.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""Collect palettes from Harmony."""
import os
import json
import re
import pyblish.api
from avalon import harmony
class CollectPalettes(pyblish.api.ContextPlugin):
"""Gather palettes from scene when publishing templates."""
label = "Palettes"
order = pyblish.api.CollectorOrder + 0.003
hosts = ["harmony"]
# list of regexes for task names where collecting should happen
allowed_tasks = []
def process(self, context):
"""Collector entry point."""
self_name = self.__class__.__name__
palettes = harmony.send(
{
"function": f"PypeHarmony.Publish.{self_name}.getPalettes",
})["result"]
# skip collecting if not in allowed task
if self.allowed_tasks:
task_name = context.data["anatomyData"]["task"].lower()
if (not any([re.search(pattern, task_name)
for pattern in self.allowed_tasks])):
return
for name, id in palettes.items():
instance = context.create_instance(name)
instance.data.update({
"id": id,
"family": "harmony.palette",
'families': [],
"asset": os.environ["AVALON_ASSET"],
"subset": "{}{}".format("palette", name)
})
self.log.info(
"Created instance:\n" + json.dumps(
instance.data, sort_keys=True, indent=4
)
)
| 2.1875 | 2 |
api/api/conf.py | sykefi/PRTR | 1 | 12759469 | from dataclasses import dataclass
@dataclass(frozen=True)
class Conf():
api_title: str
api_description: str
facilities_csv_fp: str
releases_csv_fp: str
waste_transfers_csv_fp: str
api_version: str
conf = Conf(
api_title='FIN-PRTR',
api_description=(
'The European Pollutant Release and Transfer Register (E-PRTR) '
'data published as a national web service '
'(serving only Finnish PRTR data).'
),
api_version='v1',
facilities_csv_fp=r'api/assets/facilities.csv',
releases_csv_fp=r'api/assets/releases.csv',
waste_transfers_csv_fp=r'api/assets/waste_transfers.csv'
)
| 2.1875 | 2 |
newslytics/urls.py | rangertaha/newslytics | 0 | 12759470 | <gh_stars>0
"""crawlytics URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^domains/', include('apps.domains.urls'), name='domains'),
url(r'^news/', include('apps.news.urls'), name='news'),
url(r'^objects/', include('apps.objects.urls'), name='objects'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + \
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 2.453125 | 2 |
secretcrypt/tests/test_secret.py | Zemanta/secretcrypt | 49 | 12759471 | import unittest
import mock
import secretcrypt
from secretcrypt import StrictSecret, Secret
class TestSecret(unittest.TestCase):
@mock.patch('importlib.import_module')
def test_decrypt(self, mock_import_module):
mock_crypter_module = mock.MagicMock()
mock_crypter_module.__name__ = 'secretcrypt.mock_crypter'
def mock_import_side_effect(*args, **kwargs):
self.assertEqual(kwargs['package'], secretcrypt.__name__)
if args[0] == '.mock_crypter':
return mock_crypter_module
raise Exception('Importing wrong module')
mock_import_module.side_effect = mock_import_side_effect
secret = StrictSecret('mock_crypter:key=value&key2=value2:myciphertext')
self.assertEqual(secret._decrypt_params, dict(key='value', key2='value2'))
self.assertEqual(secret._ciphertext, b'myciphertext')
secret.decrypt()
secret.decrypt()
mock_crypter_module.decrypt.assert_called_with(
b'myciphertext',
key='value',
key2='value2',
)
def test_decrypt_plain(self):
secret = StrictSecret('plain::mypass')
self.assertEqual(b'mypass', secret.decrypt())
@mock.patch('importlib.import_module')
def test_eager_decrypt(self, mock_import_module):
mock_crypter_module = mock.MagicMock()
mock_crypter_module.decrypt.side_effect = lambda *args, **kwargs: b'plaintext'
mock_crypter_module.__name__ = 'secretcrypt.mock_crypter'
def mock_import_side_effect(*args, **kwargs):
self.assertEqual(kwargs['package'], secretcrypt.__name__)
if args[0] == '.mock_crypter':
return mock_crypter_module
raise Exception('Importing wrong module')
mock_import_module.side_effect = mock_import_side_effect
secret = Secret('mock_crypter:key=value&key2=value2:myciphertext')
mock_crypter_module.decrypt.assert_called_with(
b'myciphertext',
key='value',
key2='value2',
)
mock_crypter_module.reset_mock()
plaintext = secret.get()
self.assertEqual(b'plaintext', plaintext)
mock_crypter_module.assert_not_called()
| 2.625 | 3 |
tests/test_get_media.py | waider/gopro-py-api | 1 | 12759472 | from .conftest import GoProCameraTest
from goprocam import GoProCamera
from socket import timeout
class GetMediaTest(GoProCameraTest):
def test_get_media_FS(self):
with self.monkeypatch.context() as m:
m.setattr(GoProCamera.GoPro, 'infoCamera', lambda s, x: 'FS')
m.setattr(GoProCamera.GoPro, 'getMediaFusion', lambda s: 'MF')
assert self.goprocam.getMedia() == 'MF'
def test_get_media_empty(self):
with self.monkeypatch.context() as m:
m.setattr(GoProCamera.GoPro, 'infoCamera', lambda s, x: 'HD')
# tut tut. this should raise an exception or return None
assert self.goprocam.getMedia() ==\
'http://10.5.5.9/videos/DCIM//'
def test_get_media_empty_folder(self):
with self.monkeypatch.context() as m:
self.responses['/gp/gpMediaList'] = {
'media': [
{
'd': 'folder',
'fs': [
]
}
]
}
m.setattr(GoProCamera.GoPro, 'infoCamera', lambda s, x: 'HD')
assert self.goprocam.getMedia() ==\
'http://10.5.5.9/videos/DCIM/folder/'
def test_get_media(self):
with self.monkeypatch.context() as m:
self.responses['/gp/gpMediaList'] = {
'media': [
{
'd': 'folder',
'fs': [
{'n': 'file'}
]
}
]
}
m.setattr(GoProCamera.GoPro, 'infoCamera', lambda s, x: 'HD')
assert self.goprocam.getMedia() ==\
'http://10.5.5.9/videos/DCIM/folder/file'
def test_get_media_timeout(self):
with self.monkeypatch.context() as m:
self.responses['/gp/gpMediaList'] = timeout()
m.setattr(GoProCamera.GoPro, 'infoCamera', lambda s, x: 'HD')
assert self.goprocam.getMedia() == ''
def test_get_media_httperror(self):
with self.monkeypatch.context() as m:
del(self.responses['/gp/gpMediaList'])
m.setattr(GoProCamera.GoPro, 'infoCamera', lambda s, x: 'HD')
assert self.goprocam.getMedia() == ''
| 2.328125 | 2 |
src/route.py | mattianeroni/milk-logistics | 0 | 12759473 | <reponame>mattianeroni/milk-logistics<filename>src/route.py<gh_stars>0
"""
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This file is part of the collaboration between University of Modena and
University of Campania "<NAME>". It is authored by <NAME> and <NAME>.
The scope is the implementation and validation of several algorithms to optimise the
collection of milk and its delivery to the production plant or cheese factory.
The problem is new in literature, and can be partially associated to the multi-source
vehicle routing problem, with the only difference that the starting and ending depots
are different like in the multi-source team orienteering problem.
For a better description of the problem, please refer to scientific pubblication.
Author: <NAME>, Ph.D., Eng.
Contact: <EMAIL>
Date: January 2022
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
import collections
class Route:
"""
An instance of this class represents a Route --i.e., a path
from the source to the depot made by a vehicle.
"""
def __init__(self, source, depot, vehicle):
"""
Initialise.
:param source: The source of the route.
:param depot: The depot of the route.
:param vehicle: The vehicle that will run this route.
:attr nodes: The nodes part of the route.
:attr qty: The total delivered quantity of the route.
:attr cost: The total cost of the route (i.e., its length).
"""
self.source = source
self.depot = depot
self.vehicle = vehicle
self.nodes = collections.deque()
self.qty = 0
self.cost = 0
| 2.71875 | 3 |
Chapter04/deque_avg.py | PacktPublishing/Secret-Recipes-of-the-Python-Ninja | 13 | 12759474 | from collections import deque
import itertools
def moving_average(iterable, n=3):
# moving_average([40, 30, 50, 46, 39, 44]) --> 40.0 42.0 45.0 43.0
# http://en.wikipedia.org/wiki/Moving_average
it = iter(iterable) # create an iterable object from input argument
d = deque(itertools.islice(it, n-1)) # create deque object by slicing iterable
d.appendleft(0)
s = sum(d)
for elem in it:
s += elem - d.popleft()
d.append(elem)
yield s / n # yield is like "return" but is used with generators
ma = moving_average([40, 30, 50, 46, 39, 44])
next(ma)
next(ma)
next(ma)
| 4.3125 | 4 |
flash_examples/integrations/fiftyone/image_classification.py | dmarx/lightning-flash | 0 | 12759475 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import chain
import torch
import flash
from flash.core.classification import FiftyOneLabels, Labels
from flash.core.data.utils import download_data
from flash.core.integrations.fiftyone import visualize
from flash.image import ImageClassificationData, ImageClassifier
# 1 Download data
download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip")
# 2 Load data
datamodule = ImageClassificationData.from_folders(
train_folder="data/hymenoptera_data/train/",
val_folder="data/hymenoptera_data/val/",
test_folder="data/hymenoptera_data/test/",
predict_folder="data/hymenoptera_data/predict/",
)
# 3 Fine tune a model
model = ImageClassifier(
backbone="resnet18",
num_classes=datamodule.num_classes,
output=Labels(),
)
trainer = flash.Trainer(
max_epochs=1,
gpus=torch.cuda.device_count(),
limit_train_batches=1,
limit_val_batches=1,
)
trainer.finetune(
model,
datamodule=datamodule,
strategy=("freeze_unfreeze", 1),
)
trainer.save_checkpoint("image_classification_model.pt")
# 4 Predict from checkpoint
model = ImageClassifier.load_from_checkpoint(
"https://flash-weights.s3.amazonaws.com/0.6.0/image_classification_model.pt"
)
model.output = FiftyOneLabels(return_filepath=True) # output FiftyOne format
predictions = trainer.predict(model, datamodule=datamodule)
predictions = list(chain.from_iterable(predictions)) # flatten batches
# 5 Visualize predictions in FiftyOne App
# Optional: pass `wait=True` to block execution until App is closed
session = visualize(predictions)
| 2.078125 | 2 |
Chapter03/Arista/eapi_1.py | stavsta/Mastering-Python-Networking-Second-Edition | 107 | 12759476 | <filename>Chapter03/Arista/eapi_1.py
#!/usr/bin/python2
from __future__ import print_function
from jsonrpclib import Server
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
switch = Server("https://admin:arista@192.168.199.158/command-api")
response = switch.runCmds( 1, [ "show version" ] )
print('Serial Number: ' + response[0]['serialNumber'])
| 2.03125 | 2 |
demofx/01_job/utils.py | mrm-xiefan/lunania-ai | 17 | 12759477 | <filename>demofx/01_job/utils.py
import os
from os.path import join as join_path
import config
import logging
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from luna import LunaExcepion
logger = logging.getLogger()
def preprocess_images(images):
# 'RGB'->'BGR'
images = images[:, :, ::-1]
# Zero-center by mean pixel
images[:, :, 0] -= 103.939
images[:, :, 1] -= 116.779
images[:, :, 2] -= 123.68
return images
def save_history(history, save_path):
loss = history.history['loss']
acc = history.history['acc']
val_loss = history.history['val_loss']
val_acc = history.history['val_acc']
nb_epoch = len(acc)
result_file = join_path(save_path, 'history.txt')
with open(result_file, "w") as fp:
fp.write("epoch\tloss\tacc\tval_loss\tval_acc\n")
for i in range(nb_epoch):
fp.write("%d\t%f\t%f\t%f\t%f\n" % (i, loss[i], acc[i], val_loss[i], val_acc[i]))
def plot_history(history, save_path):
# 精度の履歴をプロット
plt.plot(history.history['acc'],"o-",label="accuracy")
plt.plot(history.history['val_acc'],"o-",label="val_acc")
plt.title('model accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend(loc="lower right")
#plt.show()
acc_file = join_path(save_path, 'acc.png')
plt.savefig(acc_file)
# 損失の履歴をプロット
plt.plot(history.history['loss'],"o-",label="loss",)
plt.plot(history.history['val_loss'],"o-",label="val_loss")
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='lower right')
#plt.show()
loss_file = join_path(save_path, 'loss.png')
plt.savefig(loss_file)
def lock():
if os.path.exists(config.lock_file):
raise LunaExcepion(config.locked)
lock_file = open(config.lock_file, 'w')
lock_file.write(str(os.getpid()))
lock_file.close()
def unlock():
if os.path.exists(config.lock_file):
os.remove(config.lock_file)
def error(code):
logger.error(code)
print({'error': code})
| 2.328125 | 2 |
swagger_client/apis/addon_api.py | scubawhere/scubawhere-api-python-client | 0 | 12759478 | # coding: utf-8
"""
Scubawhere API Documentation
This is the documentation for scubawhere's RMS API. This API is only to be used by authorized parties with valid auth tokens. [Learn about scubawhere](http://www.scubawhere.com) to become an authorized consumer of our API
OpenAPI spec version: 1.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class AddonApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def add_addon(self, name, base_prices, **kwargs):
"""
Create a new addon
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_addon(name, base_prices, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: Name of the type of addon (required)
:param int base_prices: Prices for addon (required)
:param str description: Description of the addon
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.add_addon_with_http_info(name, base_prices, **kwargs)
else:
(data) = self.add_addon_with_http_info(name, base_prices, **kwargs)
return data
def add_addon_with_http_info(self, name, base_prices, **kwargs):
"""
Create a new addon
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_addon_with_http_info(name, base_prices, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str name: Name of the type of addon (required)
:param int base_prices: Prices for addon (required)
:param str description: Description of the addon
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'base_prices', 'description']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_addon" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `add_addon`")
# verify the required parameter 'base_prices' is set
if ('base_prices' not in params) or (params['base_prices'] is None):
raise ValueError("Missing the required parameter `base_prices` when calling `add_addon`")
resource_path = '/addon/add'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'name' in params:
query_params['name'] = params['name']
if 'description' in params:
query_params['description'] = params['description']
if 'base_prices' in params:
query_params['base_prices'] = params['base_prices']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2002',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def delete_addon(self, **kwargs):
"""
Delete an addon by ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_addon(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int body: ID of the Addon
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_addon_with_http_info(**kwargs)
else:
(data) = self.delete_addon_with_http_info(**kwargs)
return data
def delete_addon_with_http_info(self, **kwargs):
"""
Delete an addon by ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_addon_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int body: ID of the Addon
:return: InlineResponse2003
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_addon" % key
)
params[key] = val
del params['kwargs']
resource_path = '/addon/delete'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'body' in params:
query_params['body'] = params['body']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2003',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_addon(self, id, **kwargs):
"""
Retrieve an addon by ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_addon(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: ID of the addon to be retrieved (required)
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_addon_with_http_info(id, **kwargs)
else:
(data) = self.get_addon_with_http_info(id, **kwargs)
return data
def get_addon_with_http_info(self, id, **kwargs):
"""
Retrieve an addon by ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_addon_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: ID of the addon to be retrieved (required)
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_addon" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_addon`")
resource_path = '/addon'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'id' in params:
query_params['id'] = params['id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2001',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_all_addons(self, **kwargs):
"""
Retrieve all addons
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_addons(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Addon]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_addons_with_http_info(**kwargs)
else:
(data) = self.get_all_addons_with_http_info(**kwargs)
return data
def get_all_addons_with_http_info(self, **kwargs):
"""
Retrieve all addons
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_addons_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Addon]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_addons" % key
)
params[key] = val
del params['kwargs']
resource_path = '/addon/all'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Addon]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_all_with_trashed_addons(self, **kwargs):
"""
Retrieve all addons including any deleted models
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_with_trashed_addons(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Addon]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_with_trashed_addons_with_http_info(**kwargs)
else:
(data) = self.get_all_with_trashed_addons_with_http_info(**kwargs)
return data
def get_all_with_trashed_addons_with_http_info(self, **kwargs):
"""
Retrieve all addons including any deleted models
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_with_trashed_addons_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: list[Addon]
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_with_trashed_addons" % key
)
params[key] = val
del params['kwargs']
resource_path = '/addon/all-with-trashed'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Addon]',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def update_addon(self, id, **kwargs):
"""
Update an Addon
Updates the addon by id using the specified fields
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_addon(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: ID of the Addon to be updated (required)
:param str name: Name of the Addon
:param str description: Description of the Addon
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_addon_with_http_info(id, **kwargs)
else:
(data) = self.update_addon_with_http_info(id, **kwargs)
return data
def update_addon_with_http_info(self, id, **kwargs):
"""
Update an Addon
Updates the addon by id using the specified fields
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_addon_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: ID of the Addon to be updated (required)
:param str name: Name of the Addon
:param str description: Description of the Addon
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'name', 'description']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_addon" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_addon`")
resource_path = '/addon/edit'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'id' in params:
query_params['id'] = params['id']
if 'name' in params:
query_params['name'] = params['name']
if 'description' in params:
query_params['description'] = params['description']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2002',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
| 2 | 2 |
setup.py | cthoyt/tinydb-git | 10 | 12759479 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='tinydb-git',
version='0.2.dev1',
description='A git-based storage backend for tinydb.',
long_description=read('README.rst'),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/mbr/tinydb-git',
license='MIT',
packages=find_packages(exclude=['tests']),
install_requires=['dulwich', 'tinydb'],
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
]
)
| 1.523438 | 2 |
tests/test_api/mixins/response_and_client/protocol.py | maxzhenzhera/my_vocab_backend | 0 | 12759480 | <filename>tests/test_api/mixins/response_and_client/protocol.py
from typing import (
Any,
Protocol
)
import pytest
__all__ = ['HasResponseAndClientOnSuccessFixture']
class HasResponseAndClientOnSuccessFixture(Protocol):
@pytest.fixture(name='response_and_client_on_success')
async def fixture_response_and_client_on_success(self, *args: Any):
raise NotImplementedError
| 1.9375 | 2 |
tinyms/vision/_transform_ops.py | yexijoe/tinyms | 0 | 12759481 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore.dataset.vision.py_transforms import Grayscale, RandomHorizontalFlip as PILRandomHorizontalFlip
from mindspore.dataset.vision.c_transforms import *
from mindspore.dataset.transforms.c_transforms import *
vision_trans = [
'AutoContrast',
'BoundingBoxAugment',
'CenterCrop',
'CutMixBatch',
'CutOut',
'Decode',
'Equalize',
'Grayscale',
'HWC2CHW',
'Invert',
'MixUpBatch',
'Normalize',
'Pad',
'PILRandomHorizontalFlip',
'RandomAffine',
'RandomColor',
'RandomColorAdjust',
'RandomCrop',
'RandomCropDecodeResize',
'RandomCropWithBBox',
'RandomHorizontalFlip',
'RandomHorizontalFlipWithBBox',
'RandomPosterize',
'RandomResize',
'RandomResizedCrop',
'RandomResizedCropWithBBox',
'RandomResizeWithBBox',
'RandomRotation',
'RandomSelectSubpolicy',
'RandomSharpness',
'RandomSolarize',
'RandomVerticalFlip',
'RandomVerticalFlipWithBBox',
'Rescale',
'Resize',
'ResizeWithBBox',
'SoftDvppDecodeRandomCropResizeJpeg',
'SoftDvppDecodeResizeJpeg',
'UniformAugment',
]
common_trans = [
'Compose',
'Concatenate',
'Duplicate',
'Fill',
'Mask',
'OneHot',
'PadEnd',
'RandomApply',
'RandomChoice',
'Slice',
'TypeCast',
'Unique',
]
__all__ = vision_trans + common_trans
decode = Decode()
hwc2chw = HWC2CHW()
__all__.extend([
'decode',
'hwc2chw',
])
| 1.5 | 2 |
applications/PfemFluidDynamicsApplication/python_scripts/python_solvers_wrapper_pfem_fluid.py | ma6yu/Kratos | 0 | 12759482 | <filename>applications/PfemFluidDynamicsApplication/python_scripts/python_solvers_wrapper_pfem_fluid.py
# Making KratosMultiphysics backward compatible with python 2.6 and 2.7
from __future__ import print_function, absolute_import, division
# Importing Kratos
import KratosMultiphysics
# Other imports
from importlib import import_module
def CreateSolverByParameters(model, solver_settings, parallelism):
solver_type = solver_settings["solver_type"].GetString()
# Solvers available
if solver_type == "pfem_fluid_solver" or solver_type == "PfemFluid":
solver_module_name = "pfem_fluid_solver"
elif solver_type == "pfem_dem_solver" or solver_type == "PfemDem":
solver_module_name = "pfem_dem_solver"
elif solver_type == "pfem_fluid_nodal_integration_solver" or solver_type == "PfemFluidNodalIntegration":
solver_module_name = "pfem_fluid_nodal_integration_solver"
elif solver_type == "pfem_fluid_thermally_coupled_solver" or solver_type == "PfemFluidThermallyCoupled":
solver_module_name = "pfem_fluid_thermally_coupled_solver"
else:
err_msg = "The requested solver type \"" + solver_type + "\" is not in the python solvers wrapper.\n"
err_msg += "Available options are: \"pfem_fluid_solver\", \n"
err_msg += "\"pfem_fluid_nodal_integration_solver\", \"pfem_fluid_thermally_coupled_solver\""
raise Exception(err_msg)
module_full = 'KratosMultiphysics.PfemFluidDynamicsApplication.' + solver_module_name
solver = import_module(module_full).CreateSolver(model, solver_settings)
return solver
def CreateSolver(model, custom_settings):
if not isinstance(model, KratosMultiphysics.Model):
raise Exception("input is expected to be provided as a Kratos Model object")
if not isinstance(custom_settings, KratosMultiphysics.Parameters):
raise Exception("input is expected to be provided as a Kratos Parameters object")
solver_settings = custom_settings["solver_settings"]
parallelism = custom_settings["problem_data"]["parallel_type"].GetString()
return CreateSolverByParameters(model, solver_settings, parallelism)
| 2.0625 | 2 |
src/runner.py | RichardDastardly/msfs2020-google-map | 0 | 12759483 | import dns
import subprocess
import dns.resolver
import traceback
import urllib3
urllib3.disable_warnings()
__domains = ['kh.ssl.ak.tiles.virtualearth.net', 'khstorelive.azureedge.net']
__default_ip = {
'kh.ssl.ak.tiles.virtualearth.net': '172.16.31.10',
'khstorelive.azureedge.net': '192.168.127.12'
}
host_path = "C:\\Windows\\System32\\drivers\\etc\\hosts"
host_entries = [f"\n127.0.0.1 {domain}\n" for domain in __domains]
def add_cert():
subprocess.run(["certutil", "-addstore", "-f", "root",
".\\certs\\cert.crt"], shell=True, check=True)
def get_hosts_origin_ips():
try:
origin_ips = {}
dns_resolver = dns.resolver.Resolver()
for d in __domains:
origin_ips[d] = dns_resolver.resolve(d)[0].to_text()
print(origin_ips)
return origin_ips
except dns.exception.Timeout:
traceback.print_exc()
return __default_ip
def override_hosts():
print("Overriding hosts")
with open(host_path, "a") as f:
f.writelines(host_entries)
def restore_hosts():
print("Restoring hosts")
with open(host_path, "r+") as f:
host = f.read()
for line in host_entries:
host = host.replace(line, "")
f.seek(0)
f.write(host)
f.truncate()
| 2.5 | 2 |
bridgedb/test/test_captcha.py | jugheadjones10/bridgedb | 0 | 12759484 | <filename>bridgedb/test/test_captcha.py
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: <NAME> 0xA3ADB67A2CDB8B35 <<EMAIL>>
# :copyright: (c) 2013-2017, Isis Lovecruft
# (c) 2007-2017, The Tor Project, Inc.
# :license: 3-Clause BSD, see LICENSE for licensing information
"""Unittests for the :mod:`bridgedb.captcha` module."""
import os
import shutil
import time
from base64 import urlsafe_b64decode
from twisted.trial import unittest
from zope.interface import implementedBy
from zope.interface import providedBy
from bridgedb import captcha
from bridgedb import crypto
class CaptchaTests(unittest.TestCase):
"""Tests for :class:`bridgedb.captcha.Captcha`."""
def test_implementation(self):
"""Captcha class should implement ICaptcha interface."""
self.assertTrue(captcha.ICaptcha.implementedBy(captcha.Captcha))
def test_provider(self):
"""ICaptcha should be provided by instances of Captcha."""
c = captcha.Captcha()
self.assertTrue(captcha.ICaptcha.providedBy(c))
def test_get(self):
"""Captcha.get() should return None."""
c = captcha.Captcha()
self.assertIsNone(c.get())
class ReCaptchaTests(unittest.TestCase):
"""Tests for :class:`bridgedb.captcha.ReCaptcha`."""
def setUp(self):
self.c = captcha.ReCaptcha('publik', 'sekrit')
def test_init(self):
"""Check the ReCaptcha class stored the private and public keys."""
self.assertEquals(self.c.secretKey, 'sekrit')
self.assertEquals(self.c.publicKey, 'publik')
def test_get(self):
"""Test get() method."""
# Force urllib.request to do anything less idiotic than the defaults:
envkey = 'HTTPS_PROXY'
oldkey = None
if envkey in os.environ:
oldkey = os.environ[envkey]
os.environ[envkey] = '127.0.0.1:9150'
# This stupid thing searches the environment for ``<protocol>_PROXY``
# variables, hence the above 'HTTPS_PROXY' env setting:
proxy = captcha.urllib.request.ProxyHandler()
opener = captcha.urllib.request.build_opener(proxy)
captcha.urllib.request.install_opener(opener)
try:
# There isn't really a reliable way to test this function! :(
self.c.get()
except Exception as error:
reason = "ReCaptcha.get() test requires an active network "
reason += "connection.\nThis test failed with: %s" % error
raise unittest.SkipTest(reason)
else:
self.assertIsInstance(self.c.image, bytes)
self.assertIsInstance(self.c.challenge, str)
finally:
# Replace the original environment variable if there was one:
if oldkey:
os.environ[envkey] = oldkey
else:
os.environ.pop(envkey)
def test_get_noKeys(self):
"""ReCaptcha.get() without API keys should fail."""
c = captcha.ReCaptcha()
self.assertRaises(captcha.CaptchaKeyError, c.get)
class GimpCaptchaTests(unittest.TestCase):
"""Tests for :class:`bridgedb.captcha.GimpCaptcha`."""
def setUp(self):
here = os.getcwd()
self.topDir = here.rstrip('_trial_temp')
self.cacheDir = os.path.join(self.topDir, 'captchas')
self.badCacheDir = os.path.join(here, 'capt')
# Get keys for testing or create them:
self.sekrit, self.publik = crypto.getRSAKey('test_gimpCaptcha_RSAkey')
self.hmacKey = crypto.getKey('test_gimpCaptcha_HMACkey')
def test_init_noSecretKey(self):
"""Calling GimpCaptcha.__init__() without a secret key parameter should raise
a CaptchaKeyError.
"""
self.assertRaises(captcha.CaptchaKeyError, captcha.GimpCaptcha,
self.publik, None, self.hmacKey, self.cacheDir)
def test_init_noPublicKey(self):
"""__init__() without publicKey should raise a CaptchaKeyError."""
self.assertRaises(captcha.CaptchaKeyError, captcha.GimpCaptcha,
None, self.sekrit, self.hmacKey, self.cacheDir)
def test_init_noHMACKey(self):
"""__init__() without hmacKey should raise a CaptchaKeyError."""
self.assertRaises(captcha.CaptchaKeyError, captcha.GimpCaptcha,
self.publik, self.sekrit, None, self.cacheDir)
def test_init_noCacheDir(self):
"""__init__() without cacheDir should raise a CaptchaKeyError."""
self.assertRaises(captcha.GimpCaptchaError, captcha.GimpCaptcha,
self.publik, self.sekrit, self.hmacKey, None)
def test_init_badCacheDir(self):
"""GimpCaptcha with bad cacheDir should raise GimpCaptchaError."""
self.assertRaises(captcha.GimpCaptchaError, captcha.GimpCaptcha,
self.publik, self.sekrit, self.hmacKey,
self.cacheDir.rstrip('chas'))
def test_init(self):
"""Test that __init__ correctly initialised all the values."""
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
self.assertIsNone(c.answer)
self.assertIsNone(c.image)
self.assertIsNone(c.challenge)
def test_createChallenge(self):
"""createChallenge() should return the encrypted CAPTCHA answer."""
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
challenge = c.createChallenge('w00t')
self.assertIsInstance(challenge, str)
def test_createChallenge_base64(self):
"""createChallenge() return value should be urlsafe base64-encoded."""
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
challenge = c.createChallenge('w00t')
decoded = urlsafe_b64decode(challenge)
self.assertTrue(decoded)
def test_createChallenge_hmacValid(self):
"""The HMAC in createChallenge() return value should be valid."""
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
challenge = c.createChallenge('ShouldHaveAValidHMAC')
decoded = urlsafe_b64decode(challenge)
hmac = decoded[:20]
orig = decoded[20:]
correctHMAC = crypto.getHMAC(self.hmacKey, orig)
self.assertEquals(hmac, correctHMAC)
def test_createChallenge_decryptedAnswerMatches(self):
"""The HMAC in createChallenge() return value should be valid."""
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
challenge = c.createChallenge('ThisAnswerShouldDecryptToThis')
decoded = urlsafe_b64decode(challenge)
hmac = decoded[:20]
orig = decoded[20:]
correctHMAC = crypto.getHMAC(self.hmacKey, orig)
self.assertEqual(hmac, correctHMAC)
decrypted = self.sekrit.decrypt(orig)
timestamp = int(decrypted[:12].lstrip(b'0'))
# The timestamp should be within 30 seconds of right now.
self.assertApproximates(timestamp, int(time.time()), 30)
self.assertEqual(b'ThisAnswerShouldDecryptToThis', decrypted[12:])
def test_get(self):
"""GimpCaptcha.get() should return image and challenge strings."""
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
image, challenge = c.get()
self.assertIsInstance(image, bytes)
self.assertIsInstance(challenge, str)
def test_get_emptyCacheDir(self):
"""An empty cacheDir should raise GimpCaptchaError."""
os.makedirs(self.badCacheDir)
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.badCacheDir)
self.assertRaises(captcha.GimpCaptchaError, c.get)
shutil.rmtree(self.badCacheDir)
def test_get_unreadableCaptchaFile(self):
"""An unreadable CAPTCHA file should raise GimpCaptchaError."""
os.makedirs(self.badCacheDir)
badFile = os.path.join(self.badCacheDir, 'uNr34dA81e.jpg')
with open(badFile, 'w') as fh:
fh.write(' ')
fh.flush()
os.chmod(badFile, 0o266)
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.badCacheDir)
# This should hit the second `except:` clause in get():
self.assertRaises(captcha.GimpCaptchaError, c.get)
shutil.rmtree(self.badCacheDir)
def test_check(self):
"""A correct answer and valid challenge should return True."""
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
image, challenge = c.get()
self.assertEquals(
c.check(challenge, c.answer, c.secretKey, c.hmacKey),
True)
def test_check_blankAnswer(self):
"""A blank answer and valid challenge should return False."""
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
image, challenge = c.get()
self.assertEquals(
c.check(challenge, None, c.secretKey, c.hmacKey),
False)
def test_check_nonBase64(self):
"""Valid answer and challenge with invalid base64 returns False."""
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
image, challenge = c.get()
challengeBadB64 = challenge.rstrip('==') + "\x42\x42\x42"
self.assertEquals(
c.check(challenge, c.answer, c.secretKey, c.hmacKey),
True)
self.assertEquals(
c.check(challengeBadB64, c.answer, c.secretKey, c.hmacKey),
False)
def test_check_caseInsensitive_lowercase(self):
"""A correct answer in lowercase characters should return True."""
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
image, challenge = c.get()
solution = c.answer.lower()
self.assertEquals(
c.check(challenge, solution, c.secretKey, c.hmacKey),
True)
def test_check_caseInsensitive_uppercase(self):
"""A correct answer in uppercase characters should return True."""
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
image, challenge = c.get()
solution = c.answer.upper()
self.assertEquals(
c.check(challenge, solution, c.secretKey, c.hmacKey),
True)
def test_check_encoding_utf8(self):
"""A correct answer in utf-8 lowercase should return True."""
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
image, challenge = c.get()
solution = c.answer.encode('utf8')
self.assertEquals(
c.check(challenge, solution, c.secretKey, c.hmacKey),
True)
def test_check_encoding_ascii(self):
"""A correct answer in utf-8 lowercase should return True."""
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
image, challenge = c.get()
solution = c.answer.encode('ascii')
self.assertEquals(
c.check(challenge, solution, c.secretKey, c.hmacKey),
True)
def test_check_encoding_unicode(self):
"""A correct answer in utf-8 lowercase should return True."""
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
image, challenge = c.get()
solution = c.answer if isinstance(c.answer, str) else c.answer.decode('utf-8')
self.assertEquals(
c.check(challenge, solution, c.secretKey, c.hmacKey),
True)
def test_check_missingHMACbytes(self):
"""A challenge that is missing part of the HMAC should return False."""
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
image, challenge = c.get()
challengeBadHMAC = challenge[:10] + challenge[20:]
self.assertEquals(
c.check(challengeBadHMAC, c.answer, c.secretKey, c.hmacKey),
False)
def test_check_missingAnswerbytes(self):
"""Partial encrypted answers in challenges should return False."""
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
image, challenge = c.get()
challengeBadOrig = challenge[:20] + challenge[30:]
self.assertEquals(
c.check(challengeBadOrig, c.answer, c.secretKey, c.hmacKey),
False)
def test_check_badHMACkey(self):
"""A challenge with a bad HMAC key should return False."""
hmacKeyBad = crypto.getKey('test_gimpCaptcha_badHMACkey')
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
image, challenge = c.get()
self.assertEquals(
c.check(challenge, c.answer, c.secretKey, hmacKeyBad),
False)
def test_check_badRSAkey(self):
"""A challenge with a bad RSA secret key should return False."""
secretKeyBad, publicKeyBad = crypto.getRSAKey('test_gimpCaptcha_badRSAkey')
c = captcha.GimpCaptcha(self.publik, self.sekrit, self.hmacKey,
self.cacheDir)
image, challenge = c.get()
self.assertEquals(
c.check(challenge, c.answer, secretKeyBad, c.hmacKey),
False)
| 2.21875 | 2 |
python/plot_major_radius.py | nicolasialovega/fusion-world | 14 | 12759485 | import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import Normalize
import json
with open('../tokamaks.json') as f:
tokamaks = json.load(f)
tokamaks_names = []
radii = []
fig, ax = plt.subplots()
for tokamak in tokamaks:
if tokamak["configuration"] in ["tokamak", "stellarator"]:
if "R" in tokamak:
tokamaks_names.append(tokamak["name"])
radii.append(float(tokamak["R"]))
tokamaks_names = [
x for _, x in sorted(zip(radii, tokamaks_names), reverse=True)
]
radii = sorted(radii, reverse=True)
pos_x = 0
pos_y = 0
max_x = 40
min_x = 1 + max(radii)
to_right = True
left_or_right = 1
radii_row = []
texts = []
switch = False
for name, radius in zip(tokamaks_names, radii):
if to_right and pos_x >= max_x:
offset_y = -(max(radii_row)*1.5 + 2)
pos_y += offset_y/2
# pos_x = max_x
to_right = False
left_or_right = -1
radii_row = []
switch = True
if not to_right and pos_x <= min_x:
offset_y = -(max(radii_row)*1.5 + 2)
pos_y += offset_y/2
# pos_x = min_x
to_right = True
left_or_right = 1
radii_row = []
switch = True
radii_row.append(radius)
if not switch:
pos_x += left_or_right*(1 + radius)
circle = plt.Circle(
(pos_x, pos_y), radius,
color=cm.viridis(radius/max(radii)), fill=True)
ax.add_patch(circle)
if radius > 1.7:
text = plt.text(
pos_x - 0.3*len(name), pos_y, name,
weight="bold", fontsize=12+radius**4/len(name)**3)
texts.append(text)
elif radius > 0.3:
text = plt.text(
pos_x - 0.15*len(name), pos_y + radius, name,
rotation=15)
texts.append(text)
elif radius > 0:
text = plt.text(
pos_x - 0.15*len(name), pos_y + radius, name,
rotation=30)
texts.append(text)
if switch:
pos_y += offset_y/2
switch = False
else:
pos_x += left_or_right*radius
# tweak text manually
texts[0].set_fontsize(28)
texts[0].set_position((5, -0.5))
texts[1].set_fontsize(15)
texts[1].set_position((16, -0.5))
texts[2].set_fontsize(16)
texts[2].set_position((29, -0.5))
# texts[5].set_fontsize(16)
# texts[5].set_position((29, -0.5))
ax.set_xlim((0, max_x + 7))
ax.set_ylim((pos_y*1.1, 10))
ax.set_aspect('equal', adjustable='box')
plt.axis('off')
plt.colorbar(
cm.ScalarMappable(norm=Normalize(0, max(radii)), cmap=cm.viridis),
label="Major radius (m)")
plt.tight_layout()
plt.show()
| 2.84375 | 3 |
elastica/timestepper/symplectic_steppers.py | yeonsu-jung/PyElastica | 0 | 12759486 | <gh_stars>0
__doc__ = """Symplectic time steppers and concepts for integrating the kinematic and dynamic equations of rod-like objects. """
import numpy as np
import math
# from elastica._elastica_numba._timestepper._symplectic_steppers import (
# SymplecticStepperTag,
# PositionVerlet,
# PEFRL,
# )
# from elastica.timestepper._stepper_interface import (
# _TimeStepper,
# _LinearExponentialIntegratorMixin,
# )
from elastica.rod.data_structures import (
overload_operator_kinematic_numba,
overload_operator_dynamic_numba,
)
"""
Developer Note
--------------
For the reasons why we define Mixin classes here, the developer
is referred to the same section on `explicit_steppers.py`.
"""
class _SystemInstanceStepper:
@staticmethod
def do_step(
TimeStepper, _steps_and_prefactors, System, time: np.float64, dt: np.float64
):
for (kin_prefactor, kin_step, dyn_step) in _steps_and_prefactors[:-1]:
kin_step(TimeStepper, System, time, dt)
time += kin_prefactor(TimeStepper, dt)
System.update_internal_forces_and_torques(time)
dyn_step(TimeStepper, System, time, dt)
# Peel the last kinematic step and prefactor alone
last_kin_prefactor = _steps_and_prefactors[-1][0]
last_kin_step = _steps_and_prefactors[-1][1]
last_kin_step(TimeStepper, System, time, dt)
return time + last_kin_prefactor(TimeStepper, dt)
class _SystemCollectionStepper:
"""
Symplectic stepper collection class
"""
@staticmethod
def do_step(
TimeStepper,
_steps_and_prefactors,
SystemCollection,
time: np.float64,
dt: np.float64,
):
"""
Function for doing symplectic stepper over the user defined rods (system).
Parameters
----------
SystemCollection: rod object
time: float
dt: float
Returns
-------
"""
for (kin_prefactor, kin_step, dyn_step) in _steps_and_prefactors[:-1]:
for system in SystemCollection._memory_blocks:
kin_step(TimeStepper, system, time, dt)
time += kin_prefactor(TimeStepper, dt)
# Constrain only values
SystemCollection.constrain_values(time)
# We need internal forces and torques because they are used by interaction module.
for system in SystemCollection._memory_blocks:
system.update_internal_forces_and_torques(time)
# system.update_internal_forces_and_torques()
# Add external forces, controls etc.
SystemCollection.synchronize(time)
for system in SystemCollection._memory_blocks:
dyn_step(TimeStepper, system, time, dt)
# Constrain only rates
SystemCollection.constrain_rates(time)
# Peel the last kinematic step and prefactor alone
last_kin_prefactor = _steps_and_prefactors[-1][0]
last_kin_step = _steps_and_prefactors[-1][1]
for system in SystemCollection._memory_blocks:
last_kin_step(TimeStepper, system, time, dt)
time += last_kin_prefactor(TimeStepper, dt)
SystemCollection.constrain_values(time)
# Call back function, will call the user defined call back functions and store data
SystemCollection.apply_callbacks(time, round(time / dt))
# Zero out the external forces and torques
for system in SystemCollection._memory_blocks:
system.reset_external_forces_and_torques(time)
return time
class SymplecticStepperMethods:
def __init__(self, timestepper_instance):
take_methods_from = timestepper_instance
# Let the total number of steps for the Symplectic method
# be (2*n + 1) (for time-symmetry). What we do is collect
# the first n + 1 entries down in _steps and _prefac below, and then
# reverse and append it to itself.
self._steps = [
v
for (k, v) in take_methods_from.__class__.__dict__.items()
if k.endswith("step")
]
# Prefac here is necessary because the linear-exponential integrator
# needs only the prefactor and not the dt.
self._prefactors = [
v
for (k, v) in take_methods_from.__class__.__dict__.items()
if k.endswith("prefactor")
]
# # We are getting function named as _update_internal_forces_torques from dictionary,
# # it turns a list.
# self._update_internal_forces_torques = [
# v
# for (k, v) in take_methods_from.__class__.__dict__.items()
# if k.endswith("forces_torques")
# ]
def mirror(in_list):
"""Mirrors an input list ignoring the last element
If steps = [A, B, C]
then this call makes it [A, B, C, B, A]
Parameters
----------
in_list : input list to be mirrored, modified in-place
Returns
-------
"""
# syntax is very ugly
if len(in_list) > 1:
in_list.extend(in_list[-2::-1])
elif in_list:
in_list.append(in_list[0])
mirror(self._steps)
mirror(self._prefactors)
assert (
len(self._steps) == 2 * len(self._prefactors) - 1
), "Size mismatch in the number of steps and prefactors provided for a Symplectic Stepper!"
self._kinematic_steps = self._steps[::2]
self._dynamic_steps = self._steps[1::2]
# Avoid this check for MockClasses
if len(self._kinematic_steps) > 0:
assert (
len(self._kinematic_steps) == len(self._dynamic_steps) + 1
), "Size mismatch in the number of kinematic and dynamic steps provided for a Symplectic Stepper!"
from itertools import zip_longest
def NoOp(*args):
pass
self._steps_and_prefactors = tuple(
zip_longest(
self._prefactors,
self._kinematic_steps,
self._dynamic_steps,
fillvalue=NoOp,
)
)
def step_methods(self):
return self._steps_and_prefactors
@property
def n_stages(self):
return len(self._steps_and_prefactors)
class SymplecticStepperTag:
def __init__(self):
pass
class PositionVerlet:
"""
Position Verlet symplectic time stepper class, which
includes methods for second-order position Verlet.
"""
Tag = SymplecticStepperTag()
def __init__(self):
pass
def _first_prefactor(self, dt):
return 0.5 * dt
def _first_kinematic_step(self, System, time: np.float64, dt: np.float64):
prefac = self._first_prefactor(dt)
overload_operator_kinematic_numba(
System.n_nodes,
prefac,
System.kinematic_states.position_collection,
System.kinematic_states.director_collection,
System.velocity_collection,
System.omega_collection,
)
def _first_dynamic_step(self, System, time: np.float64, dt: np.float64):
overload_operator_dynamic_numba(
System.dynamic_states.rate_collection,
System.dynamic_rates(time, dt),
)
class PEFRL:
"""
Position Extended Forest-Ruth Like Algorithm of
<NAME>, <NAME> and <NAME>, Computer Physics Communications 146, 188 (2002),
http://arxiv.org/abs/cond-mat/0110585
"""
# xi and chi are confusing, but be careful!
ξ = np.float64(0.1786178958448091e0) # ξ
λ = -np.float64(0.2123418310626054e0) # λ
χ = -np.float64(0.6626458266981849e-1) # χ
# Pre-calculate other coefficients
lambda_dash_coeff = 0.5 * (1.0 - 2.0 * λ)
xi_chi_dash_coeff = 1.0 - 2.0 * (ξ + χ)
Tag = SymplecticStepperTag()
def __init__(self):
pass
def _first_kinematic_prefactor(self, dt):
return self.ξ * dt
def _first_kinematic_step(self, System, time: np.float64, dt: np.float64):
prefac = self._first_kinematic_prefactor(dt)
overload_operator_kinematic_numba(
System.n_nodes,
prefac,
System.kinematic_states.position_collection,
System.kinematic_states.director_collection,
System.velocity_collection,
System.omega_collection,
)
# System.kinematic_states += prefac * System.kinematic_rates(time, prefac)
def _first_dynamic_step(self, System, time: np.float64, dt: np.float64):
prefac = self.lambda_dash_coeff * dt
overload_operator_dynamic_numba(
System.dynamic_states.rate_collection,
System.dynamic_rates(time, prefac),
)
# System.dynamic_states += prefac * System.dynamic_rates(time, prefac)
def _second_kinematic_prefactor(self, dt):
return self.χ * dt
def _second_kinematic_step(self, System, time: np.float64, dt: np.float64):
prefac = self._second_kinematic_prefactor(dt)
overload_operator_kinematic_numba(
System.n_nodes,
prefac,
System.kinematic_states.position_collection,
System.kinematic_states.director_collection,
System.velocity_collection,
System.omega_collection,
)
# System.kinematic_states += prefac * System.kinematic_rates(time, prefac)
def _second_dynamic_step(self, System, time: np.float64, dt: np.float64):
prefac = self.λ * dt
overload_operator_dynamic_numba(
System.dynamic_states.rate_collection,
System.dynamic_rates(time, prefac),
)
# System.dynamic_states += prefac * System.dynamic_rates(time, prefac)
def _third_kinematic_prefactor(self, dt):
return self.xi_chi_dash_coeff * dt
def _third_kinematic_step(self, System, time: np.float64, dt: np.float64):
prefac = self._third_kinematic_prefactor(dt)
# Need to fill in
overload_operator_kinematic_numba(
System.n_nodes,
prefac,
System.kinematic_states.position_collection,
System.kinematic_states.director_collection,
System.velocity_collection,
System.omega_collection,
)
# System.kinematic_states += prefac * System.kinematic_rates(time, prefac)
| 2.359375 | 2 |
patch_extractor.py | StephenKyiMa/project_implementation | 0 | 12759487 | <gh_stars>0
class PatchExtractor:
def __init__(self, img, patch_size, stride):
self.img = img
self.size = patch_size
self.stride = stride
def extract_patches(self):
wp, hp = self.shape()
return [self.extract_patch((w, h)) for h in range(hp) for w in range(wp)]
def extract_patch(self, patch):
return self.img.crop((
patch[0] * self.stride, # left
patch[1] * self.stride, # up
patch[0] * self.stride + self.size, # right
patch[1] * self.stride + self.size # down
))
def shape(self):
wp = int((self.img.width - self.size) / self.stride + 1)
hp = int((self.img.height - self.size) / self.stride + 1)
return wp, hp
| 2.65625 | 3 |
airbyte-integrations/connectors/source-youtube-analytics/source_youtube_analytics/source.py | OTRI-Unipd/OTRI-airbyte | 2 | 12759488 | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import csv
import datetime
import io
import json
import pkgutil
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
import requests
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from airbyte_cdk.sources.streams.http import HttpStream, HttpSubStream
from airbyte_cdk.sources.streams.http.requests_native_auth import Oauth2Authenticator
from airbyte_cdk.sources.utils.transform import TransformConfig, TypeTransformer
class JobsResource(HttpStream):
"""
https://developers.google.com/youtube/reporting/v1/reference/rest/v1/jobs
All YouTube Analytics streams require a created reporting job.
This class allows to `list` all existing reporting jobs or `create` new reporting job for a specific stream. One stream can have only one reporting job.
By creating a reporting job, you are instructing YouTube to generate stream data on a daily basis. If reporting job is removed YouTube removes all stream data.
On every connector invocation, it gets a list of all running reporting jobs, if the currently processed stream has a reporting job - connector does nothing,
but if the currently processed stream does not have a job connector immediately creates one. This connector does not store IDs of reporting jobs.
If the reporting job was created by the user separately, this connector just uses that job. This connector does not remove reporting jobs it can only create them.
After reporting job is created, the first data can be available only after up to 48 hours.
"""
name = None
primary_key = None
http_method = None
url_base = "https://youtubereporting.googleapis.com/v1/"
JOB_NAME = "Airbyte reporting job"
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
return [response.json()]
def path(self, **kwargs) -> str:
return "jobs"
def request_body_json(self, **kwargs) -> Optional[Mapping]:
if self.name:
return {"name": self.JOB_NAME, "reportTypeId": self.name}
def list(self):
"https://developers.google.com/youtube/reporting/v1/reference/rest/v1/jobs/list"
self.name = None
self.http_method = "GET"
results = list(self.read_records(sync_mode=None))
result = results[0]
return result.get("jobs", {})
def create(self, name):
"https://developers.google.com/youtube/reporting/v1/reference/rest/v1/jobs/create"
self.name = name
self.http_method = "POST"
results = list(self.read_records(sync_mode=None))
result = results[0]
return result["id"]
class ReportResources(HttpStream):
"https://developers.google.com/youtube/reporting/v1/reference/rest/v1/jobs.reports/list"
name = None
primary_key = "id"
url_base = "https://youtubereporting.googleapis.com/v1/"
def __init__(self, name: str, jobs_resource: JobsResource, job_id: str, **kwargs):
self.name = name
self.jobs_resource = jobs_resource
self.job_id = job_id
super().__init__(**kwargs)
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
response_json = response.json()
reports = []
for report in response_json.get("reports", []):
report = {**report}
report["startTime"] = datetime.datetime.strptime(report["startTime"], "%Y-%m-%dT%H:%M:%S%z")
reports.append(report)
reports.sort(key=lambda x: x["startTime"])
date = kwargs["stream_state"].get("date")
if date:
reports = [r for r in reports if int(r["startTime"].date().strftime("%Y%m%d")) >= date]
if not reports:
reports.append(None)
return reports
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
if not self.job_id:
self.job_id = self.jobs_resource.create(self.name)
self.logger.info(f"YouTube reporting job is created: '{self.job_id}'")
return "jobs/{}/reports".format(self.job_id)
class ChannelReports(HttpSubStream):
"https://developers.google.com/youtube/reporting/v1/reports/channel_reports"
name = None
primary_key = None
cursor_field = "date"
url_base = ""
transformer = TypeTransformer(TransformConfig.DefaultSchemaNormalization)
def __init__(self, name: str, dimensions: List[str], **kwargs):
self.name = name
self.primary_key = dimensions
super().__init__(**kwargs)
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
fp = io.StringIO(response.text)
reader = csv.DictReader(fp)
for record in reader:
yield record
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
if not current_stream_state:
return {self.cursor_field: latest_record[self.cursor_field]}
return {self.cursor_field: max(current_stream_state[self.cursor_field], latest_record[self.cursor_field])}
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return stream_slice["parent"]["downloadUrl"]
def read_records(self, *, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
parent = stream_slice.get("parent")
if parent:
yield from super().read_records(stream_slice=stream_slice, **kwargs)
else:
self.logger.info("no data from parent stream")
yield from []
class SourceYoutubeAnalytics(AbstractSource):
@staticmethod
def get_authenticator(config):
credentials = config["credentials"]
client_id = credentials["client_id"]
client_secret = credentials["client_secret"]
refresh_token = credentials["refresh_token"]
return Oauth2Authenticator(
token_refresh_endpoint="https://oauth2.googleapis.com/token",
client_id=client_id,
client_secret=client_secret,
refresh_token=refresh_token,
)
def check_connection(self, logger, config) -> Tuple[bool, any]:
authenticator = self.get_authenticator(config)
jobs_resource = JobsResource(authenticator=authenticator)
try:
jobs_resource.list()
except Exception as e:
return False, str(e)
return True, None
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
authenticator = self.get_authenticator(config)
jobs_resource = JobsResource(authenticator=authenticator)
jobs = jobs_resource.list()
report_to_job_id = {j["reportTypeId"]: j["id"] for j in jobs}
channel_reports = json.loads(pkgutil.get_data("source_youtube_analytics", "defaults/channel_reports.json"))
streams = []
for channel_report in channel_reports:
stream_name = channel_report["id"]
dimensions = channel_report["dimensions"]
job_id = report_to_job_id.get(stream_name)
parent = ReportResources(name=stream_name, jobs_resource=jobs_resource, job_id=job_id, authenticator=authenticator)
streams.append(ChannelReports(name=stream_name, dimensions=dimensions, parent=parent, authenticator=authenticator))
return streams
| 2.484375 | 2 |
nn/clipping.py | awesome-archive/sentence-space | 211 | 12759489 | <gh_stars>100-1000
import theano.tensor as T
class MaxNorm(object):
def __init__(self, max_norm=5):
self.max_norm = max_norm
def __call__(self, grads):
norm = T.sqrt(sum([T.sum(g ** 2) for g in grads]))
return [self.clip_norm(g, self.max_norm, norm) for g in grads]
def clip_norm(self, g, c, n):
if c > 0:
g = T.switch(T.ge(n, c), g * c / n, g)
return g
class Clip(object):
def __init__(self, clip=5):
self.clip = clip
def __call__(self, grads):
return [T.clip(g, -self.clip, self.clip) for g in grads]
| 2.265625 | 2 |
geotrek/core/tests/test_views.py | fossabot/Geotrek-admin | 0 | 12759490 | # -*- coding: utf-8 -*-
import json
import re
import mock
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from geotrek.authent.tests import AuthentFixturesTest
from geotrek.common.tests import CommonTest
from geotrek.common.utils import LTE
from geotrek.authent.factories import PathManagerFactory, StructureFactory
from geotrek.authent.models import default_structure
from geotrek.core.factories import (PathFactory, StakeFactory, TrailFactory, ComfortFactory)
from geotrek.core.models import Path, Trail
class PathViewsTest(CommonTest):
model = Path
modelfactory = PathFactory
userfactory = PathManagerFactory
def login(self):
user = PathManagerFactory(password='<PASSWORD>')
success = self.client.login(username=user.username, password='<PASSWORD>')
self.assertTrue(success)
def get_bad_data(self):
return {'geom': '{"geom": "LINESTRING (0.0 0.0, 1.0 1.0)"}'}, _("Linestring invalid snapping.")
def get_good_data(self):
return {
'name': '',
'structure': default_structure().pk,
'stake': '',
'comfort': ComfortFactory.create().pk,
'trail': '',
'comments': '',
'departure': '',
'arrival': '',
'source': '',
'valid': 'on',
'geom': '{"geom": "LINESTRING (99.0 89.0, 100.0 88.0)", "snap": [null, null]}',
}
def _post_add_form(self):
# Avoid overlap, delete all !
for p in Path.objects.all():
p.delete()
super(PathViewsTest, self)._post_add_form()
def test_structurerelated_filter(self):
def test_structure(structure, stake):
user = self.userfactory(password='<PASSWORD>')
p = user.profile
p.structure = structure
p.save()
success = self.client.login(username=user.username, password='<PASSWORD>')
self.assertTrue(success)
response = self.client.get(Path.get_add_url())
self.assertEqual(response.status_code, 200)
self.assertTrue('form' in response.context)
form = response.context['form']
self.assertTrue('stake' in form.fields)
stakefield = form.fields['stake']
self.assertTrue((stake.pk, unicode(stake)) in stakefield.choices)
self.client.logout()
# Test for two structures
s1 = StructureFactory.create()
s2 = StructureFactory.create()
st1 = StakeFactory.create(structure=s1)
StakeFactory.create(structure=s1)
st2 = StakeFactory.create(structure=s2)
StakeFactory.create(structure=s2)
test_structure(s1, st1)
test_structure(s2, st2)
def test_basic_format(self):
self.modelfactory.create()
self.modelfactory.create(name=u"ãéè")
super(CommonTest, self).test_basic_format()
def test_path_form_is_not_valid_if_no_geometry_provided(self):
self.login()
data = self.get_good_data()
data['geom'] = ''
response = self.client.post(Path.get_add_url(), data)
self.assertEqual(response.status_code, 200)
def test_manager_can_delete(self):
self.login()
path = PathFactory()
response = self.client.get(path.get_detail_url())
self.assertEqual(response.status_code, 200)
response = self.client.post(path.get_delete_url())
self.assertEqual(response.status_code, 302)
def test_elevation_area_json(self):
self.login()
path = self.modelfactory.create()
url = '/api/en/paths/{pk}/dem.json'.format(pk=path.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/json')
class DenormalizedTrailTest(AuthentFixturesTest):
def setUp(self):
self.trail1 = TrailFactory(no_path=True)
self.trail2 = TrailFactory(no_path=True)
self.path = PathFactory()
self.trail1.add_path(self.path)
self.trail2.add_path(self.path)
def test_path_and_trails_are_linked(self):
self.assertIn(self.trail1, self.path.trails.all())
self.assertIn(self.trail2, self.path.trails.all())
def login(self):
user = PathManagerFactory(password='<PASSWORD>')
success = self.client.login(username=user.username, password='<PASSWORD>')
self.assertTrue(success)
def test_denormalized_path_trails(self):
PathFactory.create_batch(size=50)
TrailFactory.create_batch(size=50)
self.login()
with self.assertNumQueries(LTE(15)):
self.client.get(reverse('core:path_json_list'))
def test_trails_are_shown_as_links_in_list(self):
self.login()
response = self.client.get(reverse('core:path_json_list'))
self.assertEqual(response.status_code, 200)
paths_json = json.loads(response.content)
trails_column = paths_json['aaData'][0][6]
self.assertTrue(trails_column == u'%s, %s' % (self.trail1.name_display, self.trail2.name_display) or
trails_column == u'%s, %s' % (self.trail2.name_display, self.trail1.name_display))
class TrailViewsTest(CommonTest):
model = Trail
modelfactory = TrailFactory
userfactory = PathManagerFactory
def get_good_data(self):
path = PathFactory.create()
return {
'name': 't',
'departure': 'Below',
'arrival': 'Above',
'comments': 'No comment',
'structure': default_structure().pk,
'topology': '{"paths": [%s]}' % path.pk,
}
def test_detail_page(self):
self.login()
trail = TrailFactory()
response = self.client.get(trail.get_detail_url())
self.assertEqual(response.status_code, 200)
@mock.patch('mapentity.models.MapEntityMixin.get_attributes_html')
def test_document_export(self, get_attributes_html):
get_attributes_html.return_value = '<p>mock</p>'
trail = TrailFactory()
self.login()
with open(trail.get_map_image_path(), 'w') as f:
f.write('***' * 1000)
response = self.client.get(trail.get_document_url())
self.assertEqual(response.status_code, 200)
def test_add_trail_from_existing_topology_does_not_use_pk(self):
import bs4
self.login()
trail = TrailFactory(offset=3.14)
response = self.client.get(Trail.get_add_url() + '?topology=%s' % trail.pk)
soup = bs4.BeautifulSoup(response.content)
textarea_field = soup.find(id="id_topology")
self.assertIn('"kind": "TOPOLOGY"', textarea_field.text)
self.assertIn('"offset": 3.14', textarea_field.text)
self.assertNotIn('"pk": %s' % trail.pk, textarea_field.text)
def test_add_trail_from_existing_topology(self):
self.login()
trail = TrailFactory()
form_data = self.get_good_data()
form_data['topology'] = trail.serialize(with_pk=False)
response = self.client.post(Trail.get_add_url(), form_data)
self.assertEqual(response.status_code, 302) # success, redirects to detail view
p = re.compile(r"http://testserver/trail/(\d+)/")
m = p.match(response['Location'])
new_pk = int(m.group(1))
new_trail = Trail.objects.get(pk=new_pk)
self.assertIn(trail, new_trail.trails.all())
| 2.046875 | 2 |
onegram/exceptions.py | pauloromeira/onegram | 150 | 12759491 | <filename>onegram/exceptions.py
class OnegramException(Exception):
pass
# TODO [romeira]: Login exceptions {06/03/18 23:07}
class AuthException(OnegramException):
pass
class AuthFailed(AuthException):
pass
class AuthUserError(AuthException):
pass
class NotSupportedError(OnegramException):
pass
class RequestFailed(OnegramException):
pass
class RateLimitedError(RequestFailed):
pass
# TODO [romeira]: Query/action exceptions {06/03/18 23:08}
# TODO [romeira]: Session expired exception {06/03/18 23:08}
# TODO [romeira]: Private user exception/warning {06/03/18 23:09}
# TODO [romeira]: Not found exception {06/03/18 23:12}
# TODO [romeira]: Already following/liked/commented? warnings {06/03/18 23:12}
# TODO [romeira]: Timeout exception {06/03/18 23:12}
| 2.25 | 2 |
scripts/generate_youtube_description.py | bahaisongproject/bahai-songs | 5 | 12759492 | <reponame>bahaisongproject/bahai-songs<filename>scripts/generate_youtube_description.py
import os
import subprocess
import requests
import sys
import json
import argparse
from utils import get_music, format_songsheet, format_excerpts, get_translation
BSP_API_URL = "https://bsp-graphql-server.herokuapp.com"
BSP_API_URL = "http://localhost:4000"
CHORDPRO_DIR = "src"
QUERY = """query {{
song(where: {{
slug: "{slug}"
}} ) {{
title
song_description
slug
excerpts {{
excerpt_text
language {{
language_name_en
}}
source {{
source_author
source_description
excerpts {{
excerpt_text
source {{
source_author
source_description
}}
language {{
language_name_en
}}
}}
}}
}}
sources {{
source_author
}}
contributors {{
contributor_name
}}
languages {{
language_name_en
}}
}}
}}"""
YT_DESCRIPTION="""\
Download a song sheet with lyrics and chords
{song_url}
▬ Based on ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
{based_on}
▬ Translation ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
{translation}
▬ Lyrics & Chords ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
{song_sheet}
▬ Music ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
{music}
▬ Language ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
{language}
▬ About bahá'í song project ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
bahá’í song project was launched in 2011 by a group of friends who wanted to encourage others to sing and play Bahá’í songs in their communities. Over the years it has become a resource for people from all around the world who share the understanding that singing prayers and sacred verses can bring much joy and vibrancy to a community, and resources for learning to sing and play songs should be easily accessible.
▬ Links ▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬
► Facebook: https://www.facebook.com/bahaisongproject
► Instagram: https://www.instagram.com/bahaisongproject
► Twitter: https://twitter.com/bahaisongp
► PayPal: https://www.paypal.com/paypalme/bahaisongproject
► Website: https://www.bahaisongproject.com
"""
def main(args):
song_query = QUERY.format(slug=args.slug)
r = requests.post(BSP_API_URL, json={'query': song_query})
song_data = json.loads(r.text)['data']['song']
if song_data is None:
sys.exit('No song with slug: {slug}'.format(slug=args.slug))
yt_description_data = {}
# Song URL
yt_description_data["song_url"] = "https://www.bahaisongproject.com/{slug}".format(slug=args.slug)
# Based on
if song_data["excerpts"] is not None:
all_excerpts_formatted = format_excerpts(song_data["excerpts"])
yt_description_data["based_on"] = "\n\n".join(all_excerpts_formatted)
#Translation
if song_data["excerpts"] is not None:
all_translations = []
for excerpt in song_data["excerpts"]:
# Look up translation if excerpt is not in English
if excerpt["language"]["language_name_en"] != "English":
translation = get_translation(excerpt)
if translation:
all_translations.append(translation)
if all_translations:
all_translations_formatted = format_excerpts(all_translations)
all_translations_joined = "\n\n".join(all_translations_formatted)
yt_description_data["translation"] = all_translations_joined if all_translations else ""
# Lyrics & Chords
chordpro_cmd = "chordpro {chordpro_dir}/{slug}.pro --generate=Text"
out, err = subprocess.Popen(["chordpro", "{chordpro_dir}/{slug}.pro".format(chordpro_dir=CHORDPRO_DIR, slug=args.slug), "--generate=Text"], stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE).communicate()
song_sheet_formatted = format_songsheet(out.decode("utf-8"))
yt_description_data["song_sheet"] = song_sheet_formatted
# Music
music = get_music(song_data)
if not music:
music = "Do you know who composed this song? Please let us know!\n💌 https://bsp.app/contact"
yt_description_data["music"] = music
# Language
languages = [language["language_name_en"] for language in song_data["languages"]]
yt_description_data["language"] = ", ".join(languages)
yt_description_formatted = YT_DESCRIPTION.format(
song_url=yt_description_data["song_url"],
based_on=yt_description_data["based_on"],
song_sheet=yt_description_data["song_sheet"],
music=yt_description_data["music"],
language=yt_description_data["language"],
translation=yt_description_data["translation"]
)
print(yt_description_formatted)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generate YouTube description for bsp videos')
parser.add_argument('--slug', metavar='S', type=str, required=True,
help='slug of song')
args = parser.parse_args()
main(args)
| 1.96875 | 2 |
Algorithms on Graphs/week2/_8e0f723ed0cef023efb3387b662c8b98_09_graph_decomposition_starter_files_2/acyclicity/acyclicity.py | DuyTungHa/Algorithms-and-Data-Structures | 26 | 12759493 | <reponame>DuyTungHa/Algorithms-and-Data-Structures
#Uses python3
import sys
def dfs(adj: list) -> int:
visited = [0 for v in adj]
record = [0 for v in adj]
for v in range(len(adj)):
if visited[v] == 0:
if not explore(v, adj, visited, record):
return 1
return 0
def explore(v: int, adj: list, visited: list, record: list):
visited[v] = 1
record[v] = 1
for w in adj[v]:
if visited[w] != 1 and not explore(w, adj, visited, record):
return False
elif record[w] == 1:
return False
record[v] = 0
return True
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n, m = data[0:2]
data = data[2:]
edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2]))
adj = [[] for _ in range(n)]
for (a, b) in edges:
adj[a - 1].append(b - 1)
print(dfs(adj))
| 3.671875 | 4 |
AnyTimeGridSearchCV/grids/views.py | OryJonay/anytime-gridsearch | 11 | 12759494 | import json
import coreapi
import coreschema
from django.db.utils import IntegrityError
from django.shortcuts import get_object_or_404
from django.utils.datastructures import MultiValueDictKeyError
from numpydoc import docscrape
from rest_framework import status, schemas
from rest_framework.generics import ListAPIView, RetrieveAPIView, \
ListCreateAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from AnyTimeGridSearchCV.grids.anytime_search import ESTIMATORS_DICT, \
_convert_clf_param, ATGridSearchCV
from AnyTimeGridSearchCV.grids.models import GridSearch, CVResult, DataSet, \
CVResultScore
from AnyTimeGridSearchCV.grids.serializers import GridSearchSerializer, \
CVResultSerializer, DatasetSerializer
class EstimatorsListView(APIView):
"""
Returns a list of all available scikit-learn classifiers.
"""
def get(self, request, *args, **kwargs):
return Response(list(ESTIMATORS_DICT.keys()), status=status.HTTP_200_OK)
class EstimatorDetailView(APIView):
"""
Returns a detailed view of a scikit-learn classifier - all available arguments for the classifier.
"""
schema = schemas.AutoSchema(manual_fields=[
coreapi.Field(
'clf',
required=True,
location='path',
schema=coreschema.String(
description='scikit-learn Estimator name'
)
),
])
def get(self, request, *args, **kwargs):
try:
clf = ESTIMATORS_DICT[kwargs.get('clf',
'Not a valid scikit-learn estimator name')]
except KeyError:
return Response({'name': '', 'type': '', 'desc': ''},
status=status.HTTP_200_OK)
return Response([{'name': arg_name, 'type': arg_type, 'desc': arg_desc}
for arg_name, arg_type, arg_desc in docscrape.ClassDoc(clf)['Parameters']],
status=status.HTTP_200_OK)
class GridsListView(ListCreateAPIView):
"""
get:
Returns a list of all available grid searches.
post:
Creates a new grid search.
"""
queryset = GridSearch.objects.all()
serializer_class = GridSearchSerializer
def post(self, request, *args, **kwargs):
return ListCreateAPIView.post(self, request, *args, **kwargs)
class GridDetailView(RetrieveAPIView):
"""
Returns the specified grid (uuid, dataset name and scikit-learn classifier name).
"""
queryset = GridSearch.objects.all()
serializer_class = GridSearchSerializer
lookup_field = 'uuid'
class GridResultsListSchema(schemas.AutoSchema):
def get_manual_fields(self, path, method):
manual_fields = schemas.AutoSchema.get_manual_fields(self, path, method)
if method == 'GET':
return manual_fields
elif method == 'POST':
return manual_fields + [coreapi.Field('cv_data', required=True, location='form',
schema=coreschema.Object(description='Cross validation result'))]
class GridResultsList(ListCreateAPIView):
"""
get:
Returns a list of all the results (CV classifications) for given grid.
post:
Creates a new result instance for specified grid.
"""
queryset = CVResult.objects.all()
serializer_class = CVResultSerializer
schema = GridResultsListSchema(manual_fields=[
coreapi.Field(
'uuid',
required=True,
location='path',
schema=coreschema.String(
description='GridSearch UUID'
)
),
])
def get_queryset(self):
_gs = get_object_or_404(GridSearch, uuid=self.kwargs['uuid'])
return _gs.results.all()
def post(self, request, *args, **kwargs):
import numpy
_gs = get_object_or_404(GridSearch, uuid=self.kwargs['uuid'])
multimetric_scores = json.loads(request.data['cv_data'])
scorers = set(map(lambda j: j.split('_')[-1],
filter(lambda i: i != 'fit_time' and i != 'score_time',
multimetric_scores)))
cv_result, _ = CVResult.objects.get_or_create(gridsearch=_gs,
params=json.loads(request.data['params']))
cv_result.fit_time = multimetric_scores['fit_time']
cv_result.score_time = multimetric_scores['score_time']
cv_result.save()
CVResultScore.objects.bulk_create([CVResultScore(scorer=scorer, train_scores=multimetric_scores['train_%s' % scorer],
test_scores=multimetric_scores['test_%s' % scorer],
score=round(numpy.array(multimetric_scores[
'test_%s' % scorer]).mean(), 6),
cv_result=cv_result) for scorer in scorers])
return Response(CVResultSerializer(cv_result).data, status=status.HTTP_201_CREATED)
class DataSetsList(ListCreateAPIView):
"""
get:
Returns a list of all the existing Datasets.
post:
Creates a new Dataset instance.
"""
queryset = DataSet.objects.all()
serializer_class = DatasetSerializer
def post(self, request, *args, **kwargs):
import numpy
try:
name = request.data['name']
except MultiValueDictKeyError:
return Response('Missing dataset name', status=status.HTTP_400_BAD_REQUEST)
if not name:
return Response('Missing dataset name', status=status.HTTP_400_BAD_REQUEST)
try:
examples, labels = request.FILES['examples'], request.FILES['labels']
except MultiValueDictKeyError:
return Response('Missing dataset files', status=status.HTTP_400_BAD_REQUEST)
if examples.name != 'examples.csv':
return Response('Bad name of examples file', status=status.HTTP_400_BAD_REQUEST)
if labels.name != 'labels.csv':
return Response('Bad name of labels file', status=status.HTTP_400_BAD_REQUEST)
if len(numpy.genfromtxt(examples, delimiter=',')) != len(numpy.genfromtxt(labels, delimiter=',')):
return Response('Examples and labels are not the same length', status=status.HTTP_400_BAD_REQUEST)
try:
return Response(DatasetSerializer(DataSet.objects.create(name=name,
examples=examples,
labels=labels)).data,
status=status.HTTP_201_CREATED)
except IntegrityError:
return Response('Name already exists', status=status.HTTP_400_BAD_REQUEST)
class DataSetGridsListView(ListAPIView):
"""
Returns all grid searches on the given Dataset.
"""
queryset = GridSearch.objects.all()
serializer_class = GridSearchSerializer
schema = schemas.AutoSchema(manual_fields=[
coreapi.Field(
'name',
required=True,
location='path',
schema=coreschema.String(
description='Dataset name'
)
),
])
def get_queryset(self):
_ds = get_object_or_404(DataSet, name=self.kwargs['name'])
return _ds.grid_searches.all()
class ATGridSearchCreateView(APIView):
"""
Creates a new ATGridSearch instance (with the grid specified in the request) and starts it.
"""
schema = schemas.AutoSchema(manual_fields=[
coreapi.Field(
'dataset',
required=True,
location='form',
schema=coreschema.String(description='Dataset name')
),
coreapi.Field(
'clf',
required=True,
location='form',
schema=coreschema.String(description='scikit-learn estimator name')
),
coreapi.Field(
'args',
required=True,
location='form',
schema=coreschema.Object(description='Grid to search'),
),
])
def post(self, request, *args, **kwargs):
try:
ds = DataSet.objects.get(name=request.data['dataset'])
except DataSet.DoesNotExist:
return Response('No DataSet named {}'.format(request.data['dataset']), status=status.HTTP_400_BAD_REQUEST)
try:
classifier = ESTIMATORS_DICT[request.data['clf']]
except KeyError:
return Response('No sklearn classifier named {}'.format(request.data['clf']), status=status.HTTP_400_BAD_REQUEST)
clf_params = {k: _convert_clf_param(v) for k, v in request.data['args'].items()}
gs = ATGridSearchCV(classifier(), clf_params, dataset=ds.pk)
gs.fit()
return Response(gs._uuid, status=status.HTTP_201_CREATED)
| 2.09375 | 2 |
main.py | lukebakken/riak-python-benchmark | 0 | 12759495 | <reponame>lukebakken/riak-python-benchmark
#!/usr/bin/env python
import datetime
import logging
import random
import riak
import socket
import sys
import time
try:
from gevent import monkey
monkey.patch_all()
monkey.patch_socket(aggressive=True, dns=True)
monkey.patch_select(aggressive=True)
sys.stdout.write('using gevent.monkey\n')
except ImportError:
sys.stderr.write('NOT using gevent.monkey\n')
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s [%(levelname)s] '
'%(message)s')
def errexit(msg, *args):
logging.error(msg, *args)
sys.exit(1)
# HOSTS - comma separated list of Riak host IPs/Names
# RECORD_COUNT - total number of rows to write to Riak TS
# WORKER_COUNT - number of Threads to use, each with own RiakClient
# BATCH_SIZE - number of rows to write in each Put request
# POOL_SIZE - multiput pool size
usage = 'usage: python main.py HOSTS RECORD_COUNT WORKER_COUNT BATCH_SIZE [POOL_SIZE]'
if len(sys.argv) >= 5:
logging.debug('argv: %s', sys.argv)
hosts = sys.argv[1].split(',')
record_count = int(sys.argv[2])
worker_count = int(sys.argv[3])
batch_size = int(sys.argv[4])
pool_size = 128
if len(sys.argv) == 6:
pool_size = int(sys.argv[5])
else:
errexit(usage)
hostname = socket.getfqdn()
nodes = []
for i, host in enumerate(hosts):
node, pb_port = host.split(':')
# tuple format is:
# HOST HTTP PORT PB PORT
node = (node, 8098, pb_port)
nodes.append(node)
logging.debug('hostname: %s', hostname)
logging.debug('nodes: %s', nodes)
logging.debug('record_count: %s', record_count)
logging.debug('worker_count: %s', worker_count)
logging.debug('batch_size: %s', batch_size)
logging.debug('pool_size: %s', pool_size)
ycsb_row_size = 100
ycsb_row_count = 10
randstr = ''
for i in range(ycsb_row_size):
randstr += chr(random.randint(ord('a'), ord('z')))
def generate_rows(worker_id, start_timestamp, batch_sz):
timestamp = start_timestamp
batch = []
for i in xrange(batch_sz):
cells = []
cells.append(hostname)
cells.append(worker_id)
cells.append(timestamp)
for i in xrange(10):
cells.append(randstr)
timestamp += 1
batch.append(cells)
return batch
client = riak.RiakClient(
protocol='pbc',
nodes=nodes,
multiget_pool_size=pool_size,
multiput_pool_size=pool_size)
records_written = 0
ops_count = 0
start_time = time.time()
start_ms = riak.util.unix_time_millis(datetime.datetime.utcnow())
table_name = 'tsycsb'
table = client.table(table_name)
while records_written < record_count:
ts_objs = []
for i in xrange(worker_count):
wid = 'worker-{}'.format(i)
rows = generate_rows(wid, start_ms, batch_size)
ts_obj = table.new(rows)
ts_objs.append(ts_obj)
results = client.multiput(ts_objs)
# TODO check results
# if result != True:
# logger.error('got non-True result when storing batch')
# TODO: orly?
# https://github.com/BrianMMcClain/riak-java-benchmark/blob/master/src/main/java/com/basho/riak/BenchmarkWorker.java#L78
batch_count = batch_size * worker_count
records_written += batch_count # TODO Java increments by 1
ops_count += len(ts_objs)
start_ms += batch_count
if records_written % 1000 == 0:
logging.info('records_written: %d', records_written)
client.close()
end_time = time.time()
elapsed_secs = end_time - start_time
logging.info('wrote %d records in %d seconds', records_written, elapsed_secs)
logging.info('throughput: %d recs/sec', record_count // elapsed_secs)
logging.info('throughput: %d ops/sec', ops_count // elapsed_secs)
| 2.1875 | 2 |
tpcdsDataGenerator/dataParser/web_returns.py | bomeng/smartbench | 0 | 12759496 | <filename>tpcdsDataGenerator/dataParser/web_returns.py
from pyspark.sql import Row
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
from pyspark.sql import HiveContext
import os
conf = SparkConf()
sc = SparkContext(conf=conf)
spark = HiveContext(sc)
textDataRDD = sc.textFile(os.environ["DATA_HDFS"] + "web_returns.dat")
textDataDF = textDataRDD.map(lambda x: x.split("|")).map(lambda x: Row(wr_returned_date_sk = x[0],wr_returned_time_sk = x[1],wr_item_sk = x[2],wr_refunded_customer_sk = x[3],wr_refunded_cdemo_sk = x[4],wr_refunded_hdemo_sk = x[5],wr_refunded_addr_sk = x[6],wr_returning_customer_sk = x[7],wr_returning_cdemo_sk = x[8],wr_returning_hdemo_sk = x[9],wr_returning_addr_sk = x[10],wr_web_page_sk = x[11],wr_reason_sk = x[12],wr_order_number = x[13],wr_return_quantity = x[14],wr_return_amt = x[15],wr_return_tax = x[16],wr_return_amt_inc_tax = x[17],wr_fee = x[18],wr_return_ship_cost = x[19],wr_refunded_cash = x[20],wr_reversed_charge = x[21],wr_account_credit = x[22],wr_net_loss = x[23])).toDF()
textDataDF.write.saveAsTable("tpcds.web_returns", format="parquet", mode="overwrite")
| 2.53125 | 3 |
iroko/vocabularies/api.py | tocororo/iroko | 0 | 12759497 | # Copyright (c) 2021. Universidad de Pinar del Rio
# This file is part of SCEIBA (sceiba.cu).
# SCEIBA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
from typing import Dict
from flask_babelex import lazy_gettext as _
from flask_login import current_user
from invenio_access import Permission
from invenio_access.models import ActionUsers
from invenio_access.utils import get_identity
from invenio_accounts.models import User
from invenio_db import db
from sqlalchemy import exc as sqlalchemyExc
from iroko.sources.models import TermSources
from iroko.utils import string_as_identifier
from iroko.vocabularies.marshmallow import term_node_schema, term_schema, vocabulary_schema
from iroko.vocabularies.models import Term, TermClasification, Vocabulary
from iroko.vocabularies.permissions import ObjectVocabularyEditor, is_current_user_taxonomy_admin
# TODO: Revisar lanzamientos de excepciones
class Vocabularies:
'''Manage vocabularies'''
@classmethod
def get_vocabularies(cls):
return Vocabulary.query.all()
@classmethod
def get_vocabulary(cls, name, id=None) -> Dict[str, Vocabulary]:
if id is not None:
vocab = Vocabulary.query.filter_by(id=id).first()
elif name is not None:
vocab = Vocabulary.query.filter_by(identifier=name).first()
if vocab:
return 'ok', vocab
else:
msg = 'Vocabulary not exist id={0}'.format(id)
return msg, None
@classmethod
def edit_vocabulary(cls, name, data) -> Dict[str, Vocabulary]:
msg, vocab = cls.get_vocabulary(name)
if vocab:
try:
valid_data = vocabulary_schema.load(data)
vocab.human_name = valid_data['human_name']
vocab.description = valid_data['description']
vocab.data = valid_data['data']
db.session.commit()
msg = 'New Vocabulary UPDATED name={0}'.format(vocab.identifier)
except Exception as err:
msg = 'ERROR {0} - {1}'.format(err, data)
finally:
return msg, vocab
else:
return msg, vocab
@classmethod
def new_vocabulary(cls, input_data) -> Dict[str, Vocabulary]:
msg = ''
try:
data = vocabulary_schema.load(input_data)
vocab = Vocabulary.query.filter_by(identifier=data['name']).first()
if not vocab:
vocab = Vocabulary()
vocab.identifier = string_as_identifier(data['name'])
vocab.human_name = data['human_name']
vocab.description = data['description']
vocab.data = data['data']
db.session.add(vocab)
db.session.commit()
msg = 'New Vocabulary CREATED name={0}'.format(vocab.identifier)
else:
msg = 'Vocabulary already exist name={0}'.format(vocab.identifier)
vocab = None
except Exception as err:
msg = 'ERROR {0} - {1}'.format(err, data)
vocab = None
finally:
return msg, vocab
@classmethod
def grant_vocabulary_editor_permission(cls, user_id, vocabulary_id) -> Dict[str, bool]:
done = False
msg = ''
try:
vocabulary = Vocabulary.query.filter_by(identifier=vocabulary_id).first()
user = User.query.filter_by(id=user_id).first()
if not vocabulary:
msg = 'Vocabulary not found'
elif not user:
msg = 'User not found'
else:
db.session.add(ActionUsers.allow(ObjectVocabularyEditor(vocabulary.id), user=user))
db.session.commit()
msg = 'Vocabulary Editor Permission granted over {0}'.format(vocabulary.name)
done = True
except Exception as e:
msg = str(e)
# print(str(e))
return msg, done
@classmethod
def deny_vocabulary_editor_permission(user_id, vocabulary_id) -> Dict[str, bool]:
done = False
msg = ''
try:
vocabulary = Vocabulary.query.filter_by(identifier=vocabulary_id).first()
user = User.query.filter_by(id=user_id).first()
if not vocabulary:
msg = 'Vocabulary not found'
elif not user:
msg = 'User not found'
else:
db.session.add(ActionUsers.deny(ObjectVocabularyEditor(vocabulary.name), user=user))
db.session.commit()
msg = 'Editor Permission granted over {0}'.format(vocabulary.name)
done = True
except Exception as e:
# print(str(e))
msg = str(e)
return msg, done
@classmethod
def check_user_vocabulary_editor_permission(user_id, vocabulary_id) -> Dict[str, bool]:
done = False
msg = ''
try:
if is_current_user_taxonomy_admin():
done = True
else:
vocabulary = Vocabulary.query.filter_by(identifier=vocabulary_id).first()
user = User.query.filter_by(id=user_id)
user_identity = get_identity(user)
permission = Permission(ObjectVocabularyEditor(vocabulary.name))
done = permission.allows(user_identity)
except Exception as e:
msg = str(e)
# print(str(e))
return msg, done
class Terms:
"""Manage Terms"""
@classmethod
def get_terms(cls):
return Term.query.all()
@classmethod
def get_terms_by_vocab(cls, vocabulary_id) -> Dict[str, Term]:
msg, vocab = Vocabularies.get_vocabulary(vocabulary_id)
if not vocab:
raise Exception(msg)
terms = vocab.terms
return 'ok', terms
@classmethod
def get_first_level_terms_by_vocabulary(cls, vocabulary_id) -> Dict[str, Term]:
msg, vocab = Vocabularies.get_vocabulary(vocabulary_id)
if not vocab:
raise Exception(msg)
terms = vocab.terms.filter_by(parent_id=None).all()
return 'ok', vocab, terms
@classmethod
def get_terms_tree_by_vocabulary(cls, vocabulary_id, level: int) -> [str, Vocabulary, list]:
"""If level < 0, means all the levels of the tree"""
msg, vocab = Vocabularies.get_vocabulary(vocabulary_id)
if not vocab:
raise Exception(msg)
msg, terms = Terms.get_first_level_terms_by_vocabulary(vocabulary_id)
terms_full = []
for term in terms:
terms_full.append(term_node_schema.dump_term_node(term, level, 0))
return 'ok', vocab, terms_full
@classmethod
def get_term(cls, uuid) -> Dict[str, Term]:
term = Term.query.filter_by(uuid=uuid).first()
if term:
return 'ok', term
else:
msg = 'Term not exist uuid={0}'.format(uuid)
return msg, None
@classmethod
def get_terms_by_uuid_list(cls, uuid_list):
terms = Term.query.filter(Term.uuid.in_(uuid_list)).all()
return terms
@classmethod
def get_terms_by_id_list(cls, id_list):
terms = Term.query.filter(Term.id.in_(id_list)).all()
return terms
@classmethod
def get_term_by_id(cls, id) -> Dict[str, Term]:
term = Term.query.filter_by(id=id).first()
if term:
return 'ok', term
else:
msg = 'Term not exist id={0}'.format(id)
return msg, None
# @classmethod
# def update_or_create_term(cls, input_data, term_id=None) -> Dict[str, Term]:
# """
# given a term data, try to update if id, uuid or name is present,
# otherwise create a new term.
# """
# data = term_schema.load(input_data)
# # print("****** LOADED term")
# term = None
# # if 'uuid' in data:
# # term = Term.query.filter_by(uuid=data['uuid']).first()
# # elif term is None and 'id' in data:
# # term = Term.query.filter_by(id=data['id']).first()
# if term_id:
# term = Term.query.filter_by(id=term_id).first()
# elif term is None and 'name' in data:
# term = Term.query.filter_by(identifier=data['name']).first()
# # print("********* term is {0}".format(term))
# if term is None and 'name' in data:
# # print('********IS NEW')
# return cls.new_term(data)
# if term and 'uuid' in data:
# # print('********IS UPDATE')
# return cls.edit_term(data['uuid'], data)
# return "error", None
@classmethod
def edit_term(cls, uuid, input_data) -> Dict[str, Term]:
msg = ''
try:
data = term_schema.load(input_data)
term = Term.query.filter_by(uuid=uuid).first()
term.vocabulary_id = data['vocabulary_id']
term.name = string_as_identifier(data['name'])
term.description = data['description']
term.parent_id = data['parent_id']
term.data = data['data']
# cls._update_term_data(term, data)
# print(term.data)
try:
db.session.commit()
cls._update_term_clasification(term, data)
msg = 'New Term UPDATED name={0}'.format(term.name)
return msg, term
except sqlalchemyExc.SQLAlchemyError as e:
msg = 'sqlalthemy: {0}'.format(e)
db.session.rollback()
return msg, None
except Exception as e:
msg = 'ERROR {0} - {1}'.format(e, input_data)
return msg, None
@classmethod
def new_term(cls, data) -> Dict[str, Term]:
msg = ''
# try:
valid_data = term_schema.load(data)
term = Term.query.filter_by(identifier=valid_data['name']).first()
if not term:
# print(valid_data)
term = Term()
term.vocabulary_id = valid_data['vocabulary_id']
term.identifier = string_as_identifier(valid_data['name'])
term.description = valid_data['description']
term.parent_id = valid_data['parent_id']
term.data = valid_data['data']
# print(term.data)
db.session.add(term)
# print(term)
try:
db.session.commit()
cls._update_term_clasification(term, valid_data)
msg = 'New Term CREATED name={0}'.format(term.identifier)
return msg, term
except sqlalchemyExc.SQLAlchemyError as e:
msg = 'sqlalthemy: {0}'.format(e)
db.session.rollback()
return msg, None
else:
msg = 'Term already exist name={0}'.format(valid_data['name'])
return msg, None
# except Exception as e:
# msg = 'ERROR {0} - {1}'.format(e, data)
# return msg, None
# @classmethod
# def _get_term_data(cls, term: Term, data):
# ''''''
# # return {
# # 'vocabulary_id': data['vocabulary_id'],
# # 'name': data['name'],
# # 'description': data['description'],
# # 'parent_id': data['parent_id'],
# # 'data': data['data'],
# # }
# # print(data)
# term.vocabulary_id = data['vocabulary_id']
# # print(data)
# term.name = data['name']
# # print(data)
# term.description = data['description']
# # print(data)
# term.parent_id = data['parent_id']
# # print(data)
# term.data = data['data']
# # print(data)
@classmethod
def _update_term_data(cls, term: Term, data):
''''''
# return {
# 'vocabulary_id': data['vocabulary_id'],
# 'name': data['name'],
# 'description': data['description'],
# 'parent_id': data['parent_id'],
# 'data': data['data'],
# }
# print(data)
term.vocabulary_id = data['vocabulary_id']
# print(data)
term.identifier = data['name']
# print(data)
term.description = data['description']
# print(data)
term.parent_id = data['parent_id']
# print(data)
term.data = data['data']
# print(data)
@classmethod
def _update_term_clasification(cls, term: Term, data):
'''
this search all clasification of the term, delete it, and then create new clasification
based on params
# TODO: This will be replaced by the graph database, when done....
in data:
class_ids: IDs of Terms that clasifies this term
clasified_ids: IDs of Terms clasified by this term
'''
# print('_update_term_clasification', data)
# delete all Clasifications in wich this term is envolved
TermClasification.query.filter_by(term_class_id=term.id).delete()
TermClasification.query.filter_by(term_clasified_id=term.id).delete()
db.session.commit()
# print('_update_term_clasification', data)
# Terms clasified by this term
for clasified_ids in data['clasified_ids']:
clasified = Term.query.filter_by(id=clasified_ids).first()
if clasified:
clasification = TermClasification()
clasification.term_class_id = term.id
clasification.term_clasified_id = clasified.id
db.session.add(clasification)
# Terms that clasifies this term
for class_id in data['class_ids']:
t_class = Term.query.filter_by(id=class_id).first()
if t_class:
clasification = TermClasification()
clasification.term_class_id = t_class.id
clasification.term_clasified_id = term.id
db.session.add(clasification)
db.session.commit()
# print('_update_term_clasification', data)
@classmethod
def delete_term(cls, uuid) -> Dict[str, bool]:
try:
term = Term.query.filter_by(uuid=uuid).first()
if term:
if len(term.children) > 0:
return _(
'No se puede eliminar el término cuando otros términos dependen de él'
), False
in_clasification = TermClasification.query.filter_by(term_class_id=term.id).first()
if in_clasification:
return _(
'No se puede eliminar el término si clasificaciones dependen de él'
), False
in_source = TermSources.query.filter_by(term_id=term.id).first()
if in_source:
return _('No se puede eliminar el término si fuentes dependen de él'), False
db.session.query(TermClasification).filter_by(term_object_id=term.id).delete()
db.session.delete(term)
db.session.commit()
return 'Término: {0}, eliminado satisfactoriamente'.format(term.name), True
except Exception as e:
return str(e), False
@classmethod
def get_terms_by_vocabulary_name(cls, vocabulary_name):
try:
lista = Term.query.join(Term.vocabulary, aliased=True).filter_by(
name=vocabulary_name
).order_by(
Term.identifier
)
# print(lista[0].id)
return lista
except Exception as error:
return []
@classmethod
def get_term_tree_list(cls, term, result):
"""helper fuction to get all the children terms ids in a list
"""
result.append(term.id)
for child in term.children:
cls.get_term_tree_list(child, result)
@classmethod
def get_term_tree_list_by_level(cls, term, result, start_level=0, level=0):
"""
retornar una lista en result comenzando en el start_level abajo del term
recibido y debe avanzar level cantidad abajo de ese nivel
"""
new_start = 0
if start_level == 0:
result.append(term.id)
if start_level > 0:
new_start = start_level - 1
if level > 0:
for child in term.children:
cls.get_term_tree_list_by_level(child, result, new_start, level - 1)
# @classmethod
# def dump_term(cls, term:Term, level_to_reach: int, current_level: int):
# """ helper function to load terms children"""
# if current_level < level_to_reach:
# children = []
# for child in term.children:
# children.append(Terms.dump_term(child, level_to_reach, current_level+1))
# return {'term': term_schema.dump(term), 'children':children}
# else:
# return term_schema.dump(term)
def get_current_user_permissions() -> Dict[str, Dict[str, list]]:
"""
Checks from ActionUsers if current_user has vocabularies_full_editor_actions,
that way it has full permissions over vocabularies and terms
if not, then:
checks if it has vocabulary_editor_actions,
then collect the ids of the vocabularies it has permission on
"""
vocabularies_ids = []
if is_current_user_taxonomy_admin():
return 'actions', {'vocabularies_full_editor_actions': None}
else:
actions = ActionUsers.query.filter_by(
user=current_user,
exclude=False,
action='vocabulary_editor_actions'
).all()
for action in actions:
vocabularies_ids.append(action.argument)
return 'actions', {'vocabulary_editor_actions': vocabularies_ids}
def get_current_user_described_permissions() -> Dict[str, Dict[str, list]]:
"""
Checks from ActionUsers if current_user has vocabularies_full_editor_actions,
that way it has full permissions over vocabularies and terms
if not, then:
checks if it has vocabulary_editor_actions,
then collect the ids of the vocabularies it has permission on
and gives dict of texts
"""
vocabularies_ids = []
if is_current_user_taxonomy_admin():
return 'actions', {'vocabularies_full_editor_actions': None}
else:
actions = ActionUsers.query.filter_by(
user=current_user,
exclude=False,
action='vocabulary_editor_actions'
).all()
for action in actions:
vocabularies_ids.append(action.argument)
return 'actions', {'vocabulary_editor_actions': vocabularies_ids}
| 2.046875 | 2 |
tests/test_response.py | vyshakhbabji/ringcentral-python-sdk-with-slate-sample-code | 0 | 12759498 | <gh_stars>0
#!/usr/bin/env python
# encoding: utf-8
import unittest
from core.ajax.response import Response
class TestResponse(unittest.TestCase):
def setUp(self):
self.raw = """
HTTP/1.1 207 Multi-Status
Content-Type: multipart/mixed; boundary=Boundary_20_29214173_1373546450505
--Boundary_20_29214173_1373546450505
Content-Type: application/json
{
"response" : [ {
"href" : ".../account/400129284008/extension/400129284008/message-store/401654758008",
"status" : 200,
"responseDescription" : "OK"
}, {
"href" : ".../account/400129284008/extension/400129284008/message-store/401642088008",
"status" : 200,
"responseDescription" : "OK"
} ]
}
--Boundary_20_29214173_1373546450505
Content-Type: application/json
{
"uri" : ".../account/400129284008/extension/400129284008/message-store/401654758008",
"id" : 401654758008,
"to" : [ {
"phoneNumber" : "18559100010"
} ],
"type" : "Fax",
"creationTime" : "2013-07-11T12:05:43.000Z",
"readStatus" : "Read",
"priority" : "Normal",
"attachments" : [ {
"id" : 1,
"uri" : ".../account/400129284008/extension/400129284008/message-store/401654758008/content/1",
"contentType" : "image/tiff"
} ],
"direction" : "Outbound",
"availability" : "Alive",
"messageStatus" : "SendingFailed",
"faxResolution" : "Low",
"faxPageCount" : 0,
"lastModifiedTime" : "2013-07-11T12:26:24.000Z"
}
--Boundary_20_29214173_1373546450505
Content-Type: application/json
{
"uri" : ".../account/400129284008/extension/400129284008/message-store/401642088008",
"id" : 401642088008,
"to" : [ {
"phoneNumber" : "77653287256446"
} ],
"type" : "Fax",
"creationTime" : "2013-07-11T08:45:57.000Z",
"readStatus" : "Read",
"priority" : "Normal",
"attachments" : [ {
"id" : 1,
"uri" : ".../account/400129284008/extension/400129284008/message-store/401642088008/content/1",
"contentType" : "image/tiff"
} ],
"direction" : "Outbound",
"availability" : "Alive",
"messageStatus" : "SendingFailed",
"faxResolution" : "Low",
"faxPageCount" : 0,
"lastModifiedTime" : "2013-07-11T12:26:52.000Z"
}
--Boundary_20_29214173_1373546450505--
"""
def test_creation(self):
r = Response(200, self.raw)
self.assertIsInstance(r, Response)
self.assertTrue(r.is_multipart())
| 2.265625 | 2 |
nmr_sims/_version.py | foroozandehgroup/nmr_sims | 0 | 12759499 | # _version.py
# <NAME>
# <EMAIL>
# Last Edited: Tue 10 May 2022 10:24:50 BST
__version__ = "0.0.6"
| 1 | 1 |
unicode/servers/test_ucd.py | fluentpython/concurrency | 102 | 12759500 | import itertools
import ucd
ABC_LINES = '''
0040;COMMERCIAL AT;Po;0;ON;;;;;N;;;;;
0041;LATIN CAPITAL LETTER A;Lu;0;L;;;;;N;;;;0061;
0042;LATIN CAPITAL LETTER B;Lu;0;L;;;;;N;;;;0062;
0043;LATIN CAPITAL LETTER C;Lu;0;L;;;;;N;;;;0063;
'''.strip()
def test_parse_line():
line_A = '0041;LATIN CAPITAL LETTER A;Lu;0;L;;;;;N;;;;0061;'
code, name, old_name, words = ucd.parse_line(line_A)
assert code == 65
assert name == 'LATIN CAPITAL LETTER A'
assert old_name == ''
assert words == ['A', 'CAPITAL', 'LATIN', 'LETTER']
def test_parse_line_with_hyphen_and_field_10():
cases = [
('002D;HYPHEN-MINUS;Pd;0;ES;;;;;N;;;;;',
45, 'HYPHEN-MINUS', '', ['HYPHEN', 'MINUS']),
('005F;LOW LINE;Pc;0;ON;;;;;N;SPACING UNDERSCORE;;;;',
95, 'LOW LINE', 'SPACING UNDERSCORE',
['LINE', 'LOW', 'SPACING', 'UNDERSCORE']),
('0027;APOSTROPHE;Po;0;ON;;;;;N;APOSTROPHE-QUOTE;;;',
39, 'APOSTROPHE', 'APOSTROPHE-QUOTE', ['APOSTROPHE', 'QUOTE']),
]
for line, *fields_ok in cases:
fields = ucd.parse_line(line)
assert fields == tuple(fields_ok)
def test_parser_top_3():
records = list(itertools.islice(ucd.parser(), 3))
assert records == [
(32, 'SPACE', '', ['SPACE']),
(33, 'EXCLAMATION MARK', '', ['EXCLAMATION', 'MARK']),
(34, 'QUOTATION MARK', '', ['MARK', 'QUOTATION']),
]
def test_index():
line = '003E;GREATER-THAN SIGN;Sm;0;ON;;;;;Y;;;;;'
record = ucd.parse_line(line)
idx = ucd.index([record])
assert idx == {'GREATER': [62], 'SIGN': [62], 'THAN': [62]}
def test_index_abc():
records = [ucd.parse_line(line) for line in ABC_LINES.split('\n')]
idx = ucd.index(records)
assert idx == {
'A': [65],
'AT': [64],
'B': [66],
'C': [67],
'CAPITAL': [65, 66, 67],
'COMMERCIAL': [64],
'LATIN': [65, 66, 67],
'LETTER': [65, 66, 67],
}
| 3.109375 | 3 |
yaojikai/20180402/h1.py | python20180319howmework/homework | 0 | 12759501 | #1. 如果有两个字符串"hello" 和 “world”,生成一个列表,列表中元素["hw", "eo", "lr"]
str1 = "hello"
str2 = "world"
l = []
for i in range(len(str1)):
l.append(str1[i]+str2[i])
print(l)
| 3.828125 | 4 |
tests/basic_tests/test_player.py | sriteja777/My_Mario | 1 | 12759502 | <filename>tests/basic_tests/test_player.py
# from path import mymario as m
import sys
sys.path.append('/home/sriteja/PycharmProjects/My_Mario/mymario')
# sys.path.insert(0, '../mymario/')
import motion as m
import config as c
import old_game as g
player = m.Player({'max_x': 4, 'max_y': 5, 'min_x': 6, 'min_y': 7}, c.PLAYER)
def test_init():
assert (player.min_x, player.max_y, player.string, player.time) == (6, 5, c.PLAYER, c.DEFAULT_TIMEOUT)
def test_get_lives():
assert player.get_lives() == c.DEFAULT_LIVES
def test_update_live():
store = player.get_lives()
player.update_live(1)
assert player.get_lives() == store + 1
| 2.75 | 3 |
sabnzbd/rename_dir.py | FreekKalter/linux-scripts | 0 | 12759503 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import re
import codecs
from path import Path
wordlist = []
def main():
parser = argparse.ArgumentParser(description='rename dir based on files inside')
parser.add_argument('dirname', help='path of directory to rename')
args = parser.parse_args()
p = Path(args.dirname)
rename(p)
def rename(p):
files = [f for f in p.walkfiles() if os.stat(f).st_size > 10 * 1024 * 1024]
dirname = ''
if is_gibberish(p.basename()):
if len(files) == 1:
dirname = files[0].basename().stripext()
else:
dirname = long_substr([f.namebase for f in files])
pattern = re.compile('(scene|cd)$', re.IGNORECASE)
dirname = pattern.sub('', dirname.strip())
dirname = re.sub('[._]', ' ', dirname)
dirname = re.sub('- ?$', '', dirname).strip()
if dirname != '':
# print(p.basename() + ' -> ' + dirname)
p = p.rename(Path.joinpath(p.dirname(), dirname))
print(p.abspath())
def is_gibberish(dir):
for w in dir.split(' '):
if w.upper() in wordlist:
return False
return True
def main_main():
dirs = Path('/media/truecrypt4/down/').dirs()
for d in dirs[0:1]:
rename(Path(d))
def long_substr(data):
substr = ''
if len(data) > 1 and len(data[0]) > 0:
for i in range(len(data[0])):
for j in range(len(data[0]) - i + 1):
if j > len(substr) and all(data[0][i:i + j] in x for x in data):
substr = data[0][i:i + j]
return substr
if __name__ == '__main__':
with codecs.open('/usr/share/dict/american-english-large', mode='r', encoding='utf-8') as wl:
wordlist = [word.strip().upper() for word in wl.readlines()]
# print('done loading wordlist')
main()
| 2.96875 | 3 |
clickhouse_driver/dbapi/cursor.py | 1024inc/clickhouse-driver | 823 | 12759504 | from collections import namedtuple
from itertools import islice
from ..errors import Error as DriverError
from .errors import InterfaceError, OperationalError, ProgrammingError
Column = namedtuple(
'Column',
'name type_code display_size internal_size precision scale null_ok'
)
class Cursor(object):
class States(object):
(
NONE,
RUNNING,
FINISHED,
CURSOR_CLOSED
) = range(4)
_states = States()
def __init__(self, client, connection):
self._client = client
self._connection = connection
self._reset_state()
self.arraysize = 1
# Begin non-PEP attributes
self._columns_with_types = None
# End non-PEP attributes
super(Cursor, self).__init__()
def __repr__(self):
is_closed = self._state == self._states.CURSOR_CLOSED
return '<cursor object at 0x{0:x}; closed: {1:}>'.format(
id(self), is_closed
)
# Iteration support.
def __iter__(self):
while True:
one = self.fetchone()
if one is None:
return
yield one
# Context manager integrations.
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
@property
def description(self):
if self._state == self._states.NONE:
return None
columns = self._columns or []
types = self._types or []
return [
Column(name, type_code, None, None, None, None, True)
for name, type_code in zip(columns, types)
]
@property
def rowcount(self):
"""
:return: the number of rows that the last .execute*() produced.
"""
return self._rowcount
def close(self):
"""
Close the cursor now. The cursor will be unusable from this point
forward; an :data:`~clickhouse_driver.dbapi.Error` (or subclass)
exception will be raised if any operation is attempted with the
cursor.
"""
self._client.disconnect()
self._state = self._states.CURSOR_CLOSED
try:
# cursor can be already closed
self._connection.cursors.remove(self)
except ValueError:
pass
def execute(self, operation, parameters=None):
"""
Prepare and execute a database operation (query or command).
:param operation: query or command to execute.
:param parameters: sequence or mapping that will be bound to
variables in the operation.
:return: None
"""
self._check_cursor_closed()
self._begin_query()
try:
execute, execute_kwargs = self._prepare()
response = execute(
operation, params=parameters, with_column_types=True,
**execute_kwargs
)
except DriverError as orig:
raise OperationalError(orig)
self._process_response(response)
self._end_query()
def executemany(self, operation, seq_of_parameters):
"""
Prepare a database operation (query or command) and then execute it
against all parameter sequences found in the sequence
`seq_of_parameters`.
:param operation: query or command to execute.
:param seq_of_parameters: sequences or mappings for execution.
:return: None
"""
self._check_cursor_closed()
self._begin_query()
try:
execute, execute_kwargs = self._prepare()
response = execute(
operation, params=seq_of_parameters, **execute_kwargs
)
except DriverError as orig:
raise OperationalError(orig)
self._process_response(response, executemany=True)
self._end_query()
def fetchone(self):
"""
Fetch the next row of a query result set, returning a single sequence,
or None when no more data is available.
:return: the next row of a query result set or None.
"""
self._check_query_started()
if self._stream_results:
return next(self._rows, None)
else:
if not self._rows:
return None
return self._rows.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of
sequences (e.g. a list of tuples). An empty sequence is returned when
no more rows are available.
:param size: amount of rows to return.
:return: list of fetched rows or empty list.
"""
self._check_query_started()
if size is None:
size = self.arraysize
if self._stream_results:
if size == -1:
return list(self._rows)
else:
return list(islice(self._rows, size))
if size < 0:
rv = self._rows
self._rows = []
else:
rv = self._rows[:size]
self._rows = self._rows[size:]
return rv
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a
sequence of sequences (e.g. a list of tuples).
:return: list of fetched rows.
"""
self._check_query_started()
if self._stream_results:
return list(self._rows)
rv = self._rows
self._rows = []
return rv
def setinputsizes(self, sizes):
# Do nothing.
pass
def setoutputsize(self, size, column=None):
# Do nothing.
pass
# Begin non-PEP methods
@property
def columns_with_types(self):
"""
:return: list of column names with corresponding types of the last
.execute*(). E.g. [('x', 'UInt64')].
"""
return self._columns_with_types
def set_stream_results(self, stream_results, max_row_buffer):
"""
Toggles results streaming from server. Driver will consume
block-by-block of `max_row_buffer` size and yield row-by-row from each
block.
:param stream_results: enable or disable results streaming.
:param max_row_buffer: specifies the maximum number of rows to buffer
at a time.
:return: None
"""
self._stream_results = stream_results
self._max_row_buffer = max_row_buffer
def set_settings(self, settings):
"""
Specifies settings for cursor.
:param settings: dictionary of query settings
:return: None
"""
self._settings = settings
def set_types_check(self, types_check):
"""
Toggles type checking for sequence of INSERT parameters.
Disabled by default.
:param types_check: new types check value.
:return: None
"""
self._types_check = types_check
def set_external_table(self, name, structure, data):
"""
Adds external table to cursor context.
If the same table is specified more than once the last one is used.
:param name: name of external table
:param structure: list of tuples (name, type) that defines table
structure. Example [(x, 'Int32')].
:param data: sequence of rows of tuples or dicts for transmission.
:return: None
"""
self._external_tables[name] = (structure, data)
def set_query_id(self, query_id):
"""
Specifies the query identifier for cursor.
:param query_id: the query identifier.
:return: None
"""
self._query_id = query_id
# End non-PEP methods
# Private methods.
def _prepare(self):
external_tables = [
{'name': name, 'structure': structure, 'data': data}
for name, (structure, data) in self._external_tables.items()
] or None
execute = self._client.execute
if self._stream_results:
execute = self._client.execute_iter
self._settings = self._settings or {}
self._settings['max_block_size'] = self._max_row_buffer
execute_kwargs = {
'settings': self._settings,
'external_tables': external_tables,
'types_check': self._types_check,
'query_id': self._query_id
}
return execute, execute_kwargs
def _process_response(self, response, executemany=False):
if executemany:
self._rowcount = response
response = None
if not response or isinstance(response, int):
self._columns = self._types = self._rows = []
if isinstance(response, int):
self._rowcount = response
return
if self._stream_results:
columns_with_types = next(response)
rows = response
else:
rows, columns_with_types = response
self._columns_with_types = columns_with_types
# Only SELECT queries have columns_with_types.
# DDL and INSERT INTO ... SELECT queries have empty columns header.
# We need to obtain rows count only during non-streaming SELECTs.
if columns_with_types:
self._columns, self._types = zip(*columns_with_types)
if not self._stream_results:
self._rowcount = len(rows)
else:
self._columns = self._types = []
self._rows = rows
def _reset_state(self):
"""
Resets query state and get ready for another query.
"""
self._state = self._states.NONE
self._columns = None
self._types = None
self._rows = None
self._rowcount = -1
self._stream_results = False
self._max_row_buffer = 0
self._settings = None
self._query_id = None
self._external_tables = {}
self._types_check = False
def _begin_query(self):
self._state = self._states.RUNNING
def _end_query(self):
self._state = self._states.FINISHED
def _check_cursor_closed(self):
if self._state == self._states.CURSOR_CLOSED:
raise InterfaceError('cursor already closed')
def _check_query_started(self):
if self._state == self._states.NONE:
raise ProgrammingError('no results to fetch')
| 2.734375 | 3 |
event_rsvp/cms_plugins.py | django-cms-plugins/django-event-rsvp | 5 | 12759505 | """CMS Plugins for the ``event_rsvp`` app."""
from django.utils.timezone import now
from django.utils.translation import ugettext as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .models import Event
class CMSEventPlugin(CMSPluginBase):
name = _('Upcoming Events')
render_template = 'event_rsvp/upcoming_events.html'
def render(self, context, instance, placeholder):
context.update({
'events': Event.objects.filter(start__gt=now(),
is_published=True)[:3],
'placeholder': placeholder,
})
return context
plugin_pool.register_plugin(CMSEventPlugin)
| 1.78125 | 2 |
code/tasks/NAV/agents/executor.py | khanhptnk/iliad | 7 | 12759506 | import os
import sys
import json
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
import models
import worlds
class Executor(object):
STOP = 0
def __init__(self, config):
self.config = config
self.device = config.device
self.vocab = config.vocab
self.world = worlds.load(config)
model_config = config.executor.model
model_config.device = config.device
model_config.vocab_size = len(self.vocab)
model_config.loc_embed_size = config.world.loc_embed_size
model_config.max_instruction_length = config.executor.max_instruction_length
model_config.pad_idx = self.vocab['<PAD>']
self.model = models.load(model_config).to(self.device)
logging.info('model: ' + str(self.model))
self.optim = torch.optim.Adam(
self.model.parameters(), lr=model_config.learning_rate)
if hasattr(model_config, 'load_from'):
self.load(model_config.load_from)
self.loss_fn = nn.CrossEntropyLoss(ignore_index=-1)
self.max_instruction_length = config.executor.max_instruction_length
def _to_tensor(self, x):
return torch.tensor(x).to(self.device)
def _to_tensor_from_numpy(self, x):
return torch.from_numpy(x).to(self.device)
def _index_and_pad(self, xs, vocab, reverse=True):
encodings = []
masks = []
for x in xs:
x = x[:self.max_instruction_length] + ['<EOS>']
encodings.append([vocab[w] for w in x])
if reverse:
encodings[-1] = list(reversed(encodings[-1]))
masks.append([0] * len(encodings[-1]))
# Padding
max_len = max([len(encoding) for encoding in encodings])
for i, encoding in enumerate(encodings):
encoding.extend([vocab['<PAD>']] * (max_len - len(encoding)))
for mask in masks:
mask.extend([1] * (max_len - len(mask)))
encodings = self._to_tensor(encodings).long()
masks = self._to_tensor(masks).bool()
return encodings, masks
def _nav_action_variable(self, states):
max_num_a = max(len(state.adj_loc_list) for state in states)
invalid = np.zeros((self.batch_size, max_num_a), np.uint8)
action_embed_size = states[0].action_embeddings.shape[-1]
action_embeds = np.zeros(
(self.batch_size, max_num_a, action_embed_size), dtype=np.float32)
for i, state in enumerate(states):
num_a = len(state.adj_loc_list)
invalid[i, num_a:] = 1
action_embeds[i, :num_a, :] = state.action_embeddings
action_embeds = self._to_tensor_from_numpy(action_embeds).float()
invalid = self._to_tensor_from_numpy(invalid).bool()
return action_embeds, invalid
def init(self, init_poses, instructions, is_eval):
if is_eval:
self.model.eval()
else:
self.model.train()
self.is_eval = is_eval
self.batch_size = len(instructions)
self.state_seqs = []
self.pred_action_seqs = [[] for _ in range(self.batch_size)]
self.teacher_action_seqs = []
self.action_logit_seqs = []
self.logit_mask_seqs = []
self.terminated = [False] * self.batch_size
instr_encodings, instr_masks = self._index_and_pad(
instructions, self.vocab)
self.text_dec_h, self.state_dec_h, self.dec_time, self.instructions = \
self.model.encode(instr_encodings, instr_masks)
self.instruction_masks = instr_masks
self.prev_action_embeds = self.model.init_action(self.batch_size)
self.timer = self.config.executor.max_timesteps
init_states = self.world.init(init_poses)
return init_states
def act(self, states, teacher_actions=None, bc=False):
curr_view_features = [state.curr_view_features for state in states]
curr_view_features = self._to_tensor_from_numpy(
np.stack(curr_view_features))
all_action_embeds, logit_masks = self._nav_action_variable(states)
self.text_dec_h, self.state_dec_h, self.dec_time, action_logits = \
self.model.decode(
self.text_dec_h,
self.state_dec_h,
self.dec_time,
self.prev_action_embeds,
all_action_embeds,
self.instructions,
self.instruction_masks,
curr_view_features,
logit_masks
)
self.action_logit_seqs.append(action_logits)
self.logit_mask_seqs.append(logit_masks)
self.state_seqs.append(states)
if self.is_eval:
pred_actions = action_logits.max(dim=1)[1].tolist()
self.prev_actions = pred_actions
for i in range(self.batch_size):
if not self.terminated[i]:
self.pred_action_seqs[i].append(pred_actions[i])
else:
if bc:
pred_actions = teacher_actions
else:
pred_actions = D.Categorical(logits=action_logits).sample().tolist()
self.prev_actions = pred_actions
teacher_actions = self._to_tensor(teacher_actions).long()
for i in range(self.batch_size):
if self.terminated[i]:
teacher_actions[i] = -1
self.teacher_action_seqs.append(teacher_actions)
self.timer -= 1
for i in range(self.batch_size):
self.terminated[i] |= self.timer <= 0
self.terminated[i] |= self.prev_actions[i] == self.STOP
self.prev_action_embeds = all_action_embeds[np.arange(self.batch_size), pred_actions, :].detach()
return self.prev_actions
def has_terminated(self):
return all(self.terminated)
def get_action_seqs(self):
return self.pred_action_seqs
def predict(self, init_poses, instructions):
with torch.no_grad():
states = self.init(init_poses, instructions, True)
paths = [[state.viewpoint] for state in states]
poses = [[pose] for pose in init_poses]
while not self.has_terminated():
pred_actions = self.act(states)
states = states.step(pred_actions)
for i, state in enumerate(states):
pose = (state.scan, state.viewpoint, state.heading, state.elevation)
if not self.terminated[i]:
poses[i].append(pose)
if state.viewpoint != paths[i][-1]:
paths[i].append(states[i].viewpoint)
return paths, poses
def compute_loss(self):
assert len(self.teacher_action_seqs) == len(self.action_logit_seqs)
loss = 0
zipped_info = zip(self.action_logit_seqs, self.teacher_action_seqs)
for logits, refs in zipped_info:
loss += self.loss_fn(logits, refs)
return loss
def learn(self):
loss = self.compute_loss()
self.optim.zero_grad()
loss.backward()
self.optim.step()
return loss.item() / len(self.teacher_action_seqs)
def save(self, name, trajectories=None):
file_path = os.path.join(self.config.experiment_dir, name + '.ckpt')
ckpt = { 'model_state_dict': self.model.state_dict(),
'optim_state_dict': self.optim.state_dict() }
torch.save(ckpt, file_path)
logging.info('Saved %s model to %s' % (name, file_path))
def load(self, file_path):
ckpt = torch.load(file_path, map_location=self.device)
self.model.load_state_dict(ckpt['model_state_dict'])
self.optim.load_state_dict(ckpt['optim_state_dict'])
logging.info('Loaded model from %s' % file_path)
| 2.1875 | 2 |
feacher/__init__.py | qpochlabs/feacher | 0 | 12759507 | <reponame>qpochlabs/feacher
from feacher.feacher import extract
| 0.996094 | 1 |
finrl/trade.py | haitt00/FinRL-Library | 0 | 12759508 | import torch
from elegantrl.agent import AgentPPO
from elegantrl.run import Arguments
from finrl.neo_finrl.data_processor import DataProcessor
from ray.rllib.agents.ppo import PPOTrainer, ppo
from stable_baselines3 import PPO
def trade(
start_date,
end_date,
ticker_list,
data_source,
time_interval,
technical_indicator_list,
drl_lib,
env,
agent,
mode="backtesting",
if_vix=True,
**kwargs
):
if mode == "backtesting":
DP = DataProcessor(data_source, **kwargs)
data = DP.download_data(ticker_list, start_date, end_date, time_interval)
data = DP.clean_data(data)
data = DP.add_technical_indicator(data, technical_indicator_list)
if if_vix:
data = DP.add_vix(data)
price_array, tech_array, risk_array = DP.df_to_array(data, if_vix)
env_config = {
"price_array": price_array,
"tech_array": tech_array,
"risk_array": risk_array,
"if_train": False,
}
env_instance = env(config=env_config)
net_dimension = kwargs.get("net_dimension", 2 ** 7)
cwd = kwargs.get("cwd", "./" + str(agent))
# test on elegantrl
if drl_lib == "elegantrl":
# select agent
if agent == "ppo":
args = Arguments(if_on_policy=True)
args.agent = AgentPPO()
args.env = env_instance
args.agent.if_use_cri_target = True
else:
raise ValueError(
"Invalid agent input or the agent input is not \
supported yet."
)
# load agent
try:
state_dim = env_instance.state_dim
action_dim = env_instance.action_dim
agent = args.agent
net_dim = net_dimension
agent.init(net_dim, state_dim, action_dim)
agent.save_or_load_agent(cwd=cwd, if_save=False)
act = agent.act
device = agent.device
except BaseException:
raise ValueError("Fail to load agent!")
# test on the testing env
_torch = torch
state = env_instance.reset()
episode_returns = list() # the cumulative_return / initial_account
with _torch.no_grad():
for i in range(env_instance.max_step):
s_tensor = _torch.as_tensor((state,), device=device)
a_tensor = act(s_tensor) # action_tanh = act.forward()
action = (
a_tensor.detach().cpu().numpy()[0]
) # not need detach(), because with torch.no_grad() outside
state, reward, done, _ = env_instance.step(action)
total_asset = (
env_instance.amount
+ (
env_instance.price_ary[env_instance.day]
* env_instance.stocks
).sum()
)
episode_return = total_asset / env_instance.initial_total_asset
episode_returns.append(episode_return)
if done:
break
print("Test Finished!")
# return episode returns on testing data
return episode_returns
# test using rllib
elif drl_lib == "rllib":
# load agent
config = ppo.DEFAULT_CONFIG.copy()
config["env"] = env
config["log_level"] = "WARN"
config["env_config"] = {
"price_array": price_array,
"tech_array": tech_array,
"risk_array": risk_array,
"if_train": False,
}
trainer = PPOTrainer(env=env, config=config)
try:
trainer.restore(cwd)
print("Restoring from checkpoint path", cwd)
except BaseException:
raise ValueError("Fail to load agent!")
# test on the testing env
state = env_instance.reset()
episode_returns = list() # the cumulative_return / initial_account
done = False
while not done:
action = trainer.compute_single_action(state)
state, reward, done, _ = env_instance.step(action)
total_asset = (
env_instance.amount
+ (
env_instance.price_ary[env_instance.day] * env_instance.stocks
).sum()
)
episode_return = total_asset / env_instance.initial_total_asset
episode_returns.append(episode_return)
print("episode return: " + str(episode_return))
print("Test Finished!")
return episode_returns
# test using stable baselines3
elif drl_lib == "stable_baselines3":
try:
# load agent
model = PPO.load(cwd)
print("Successfully load model", cwd)
except BaseException:
raise ValueError("Fail to load agent!")
# test on the testing env
state = env_instance.reset()
episode_returns = list() # the cumulative_return / initial_account
done = False
while not done:
action = model.predict(state)[0]
state, reward, done, _ = env_instance.step(action)
total_asset = (
env_instance.amount
+ (
env_instance.price_ary[env_instance.day] * env_instance.stocks
).sum()
)
episode_return = total_asset / env_instance.initial_total_asset
episode_returns.append(episode_return)
print("episode_return", episode_return)
print("Test Finished!")
return episode_returns
else:
raise ValueError("DRL library input is NOT supported yet. Please check.")
elif mode == "paper_trading":
print("Paper trading is NOT supported for now.")
else:
raise ValueError(
"Invalid mode input! Please input either 'backtesting' or 'paper_trading'."
)
if __name__ == "__main__":
# fetch data
from finrl.neo_finrl.neofinrl_config import FAANG_TICKER
from finrl.neo_finrl.neofinrl_config import TECHNICAL_INDICATORS_LIST
from finrl.neo_finrl.neofinrl_config import TRADE_START_DATE
from finrl.neo_finrl.neofinrl_config import TRADE_END_DATE
# construct environment
from finrl.neo_finrl.env_stock_trading.env_stock_trading import StockTradingEnv
env = StockTradingEnv
# demo for elegantrl
trade(
start_date=TRADE_START_DATE,
end_date=TRADE_END_DATE,
ticker_list=FAANG_TICKER,
data_source="yahoofinance",
time_interval="1D",
technical_indicator_list=TECHNICAL_INDICATORS_LIST,
drl_lib="elegantrl",
env=env,
agent="ppo",
cwd="./test_ppo",
net_dimension=2 ** 9,
)
# demo for rllib
trade(
start_date=TRADE_START_DATE,
end_date=TRADE_END_DATE,
ticker_list=FAANG_TICKER,
data_source="yahoofinance",
time_interval="1D",
technical_indicator_list=TECHNICAL_INDICATORS_LIST,
drl_lib="rllib",
env=env,
agent="ppo",
cwd="./test_ppo",
net_dimension=2 ** 9,
)
# demo for stable-baselines3
trade(
start_date=TRADE_START_DATE,
end_date=TRADE_END_DATE,
ticker_list=FAANG_TICKER,
data_source="yahoofinance",
time_interval="1D",
technical_indicator_list=TECHNICAL_INDICATORS_LIST,
drl_lib="stable_baseline3",
env=env,
agent="ppo",
cwd="./test_ppo",
net_dimension=2 ** 9,
)
| 2.140625 | 2 |
hacktheback/rest/forms/views/form.py | hackthevalley/hack-the-back | 0 | 12759509 | from django.utils import timezone
from django.utils.translation import ugettext as _
from drf_spectacular.utils import (
OpenApiResponse,
extend_schema,
extend_schema_view,
)
from rest_framework import mixins, status, viewsets
from rest_framework.decorators import action
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from hacktheback.forms.models import Form
from hacktheback.rest.exceptions import ConflictError
from hacktheback.rest.forms.openapi import id_or_type_parameter
from hacktheback.rest.forms.serializers import FormSerializer
from hacktheback.rest.pagination import StandardResultsPagination
from hacktheback.rest.permissions import AdminSiteModelPermissions
class IdOrTypeLookupMixin:
lookup_field = None
lookup_url_kwarg = "id_or_type"
def get_object(self):
"""
Returns the object the view is displaying.
"""
queryset = self.filter_queryset(self.get_queryset())
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg
assert self.lookup_url_kwarg in self.kwargs, (
"Expected view %s to be called with a URL keyword argument "
'named "%s". Fix your URL conf.'
% (self.__class__.__name__, lookup_url_kwarg)
)
lookup_value = self.kwargs[lookup_url_kwarg]
if lookup_value == "hacker_application":
filter_kwargs = {"type": Form.FormType.HACKER_APPLICATION}
else:
filter_kwargs = {"pk": lookup_value}
obj = get_object_or_404(queryset, **filter_kwargs)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
@extend_schema(tags=["Hacker APIs", "Forms"])
class FormsViewSet(
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet,
):
queryset = Form.objects.filter(
is_draft=False,
)
authentication_classes = ()
serializer_class = FormSerializer
@extend_schema(summary="List Forms")
def list(self, request, *args, **kwargs):
"""
List all forms that have been published between their `start_at` and
`end_at` times.
"""
return super().list(request, *args, **kwargs)
@extend_schema(summary="Retrieve a Form")
def retrieve(self, request, *args, **kwargs):
"""
Retrieve a form that has been published between its `start_at` and
`end_at` times.
"""
return super().retrieve(request, *args, **kwargs)
def get_hacker_application_form(self):
queryset = self.filter_queryset(self.get_queryset())
return get_object_or_404(
queryset, type=Form.FormType.HACKER_APPLICATION
)
@extend_schema(summary="Retrieve the Hacker Application Form")
@action(detail=False)
def hacker_application(self, request, *args, **kwargs):
"""
Retrieve the hacker application form that has been published between
its `start_at` and `end_at` times.
"""
self.get_object = self.get_hacker_application_form
return self.retrieve(request, *args, **kwargs)
@extend_schema(tags=["Admin APIs", "Forms"])
@extend_schema_view(
list=extend_schema(summary="List Forms", description="List all forms."),
retrieve=extend_schema(
summary="Retrieve a Form",
description="Retrieve a form.",
parameters=[id_or_type_parameter()],
),
create=extend_schema(
summary="Create a Form", description="Create a form."
),
update=extend_schema(
summary="Update a Form",
description="Update a form.",
parameters=[id_or_type_parameter()],
),
partial_update=extend_schema(
summary="Partial Update a Form",
description="Partial update a form.",
parameters=[id_or_type_parameter()],
),
destroy=extend_schema(
summary="Delete a Form",
description="Delete a form.",
parameters=[id_or_type_parameter()],
),
publish=extend_schema(
summary="Publish a Form",
description="Publish a form. This sets `is_draft` to `False`.",
parameters=[id_or_type_parameter()],
request=None,
responses={
"204": OpenApiResponse(description="Form published successfully."),
},
),
unpublish=extend_schema(
summary="Unpublish a Form",
description="Unpublish a form. This sets `is_draft` to `True`.",
parameters=[id_or_type_parameter()],
request=None,
responses={
"204": OpenApiResponse(
description="Form unpublished successfully."
),
},
),
)
class FormsAdminViewSet(IdOrTypeLookupMixin, viewsets.ModelViewSet):
queryset = Form.objects.all()
serializer_class = FormSerializer
pagination_class = StandardResultsPagination
permission_classes = (AdminSiteModelPermissions,)
def perform_create(self, serializer):
# If the form we are creating is of type hacker application, raise
# conflict error if one that already exists.
type_to_create = serializer.data.get("type", None)
if type_to_create == Form.FormType.HACKER_APPLICATION:
raise ConflictError(
detail=_("A hacker application form already exists.")
)
serializer.save()
@action(detail=True, methods=["POST"])
def publish(self, request, id_or_type=None):
form = self.get_object()
form.is_draft = False
form.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=["POST"])
def unpublish(self, request, id_or_type=None):
form = self.get_object()
form.is_draft = True
form.save()
return Response(status=status.HTTP_204_NO_CONTENT)
| 1.992188 | 2 |
pc_software/controlbox.py | misan/PushDevice | 1 | 12759510 | import matplotlib.pyplot as plt
# import numpy as np
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import PySimpleGUI as sg
import matplotlib
import serial as ser
from serial import SerialException
from time import sleep
import glob
import os
# from pathlib import Path
from threading import Thread
from configparser import ConfigParser
import sys
import signal
import time
import datetime
def command(l):
print('SEND: '+l)
s.write(l.encode('utf-8'))
s.write(b'\r\n')
return receive(1)
def send(l):
s.write(l.encode('utf-8'))
#s.write(b'\r\n')
def receive(end): ## end will signal when to die
global busy, recibido
while True:
line = s.readline().decode()
recibido += line
if line.strip() == end:
busy = False
with open(file,'w') as f:
for i in recibido.splitlines():
if len(i)>1:
f.write(i.replace('.',',')+'\n')
f.close()
return
def update(): # send elon & force values to Arduino
global recibido, MaximumElongation, MaxPushForce
send('L'+str(MaximumElongation))
line = s.readline().decode()
recibido += line
window['box'].update(recibido)
send('F'+str(MaxPushForce))
line = s.readline().decode()
recibido += line
window['box'].update(recibido)
config = ConfigParser()
found = config.read('defaults.ini')
if len(found):
# Read config parameter from INI file
print("INI file: "+str(found[0]))
port = config.get('SerialPort','COM')
ComSpeed = config.getint('SerialPort','BaudRate')
CellScale = config.getfloat('General','CellScale')
MaxPushForce = config.getfloat('General','MaxPushForce')
MaximumElongation = config.getfloat('General','MaximumElongation')
DataDir = config.get('General','DataDir')
layout=[[sg.Text("Serial Port to Arduino:"), sg.Input(port, size=(25, 1), enable_events=True, key="Port"), sg.Button('Connect'),sg.Button('Disconnect')],
[sg.Text('MaxDisplacement (mm)'), sg.Input(MaximumElongation,size=(5,1),key="Elon"), sg.Text('MaxForce (N)'),sg.Input(MaxPushForce,size=(5,1),key="Force"), sg.Button('Set') ],
[sg.Button('Start'), sg.Button('ResetCell'),sg.Button('ManualMeasurement'), sg.Button('STOP',button_color=(None,'red'))],
[sg.Button('StartManualTest'), sg.Text('motor disabled')],
[sg.Multiline('Last measures',size=(40,10),key='box', autoscroll=True,)]]
window = sg.Window('Push Device Control',layout, finalize=True)
window['Disconnect'].update(disabled=True)
recibido='Last measurements\n'
connected = False
busy = False
while True:
if not busy:
windows, event, values = sg.read_all_windows()
else:
windows, event, values = sg.read_all_windows(timeout=200)
window['box'].update(recibido) #values['box']+"Line")
if not connected and event == 'Connect': #################CONNECT!!!!!!!
connected = True
window['Disconnect'].update(disabled=False)
window['Connect'].update(disabled=True)
try:
port = values['Port']
s = ser.Serial(port, baudrate=ComSpeed, timeout=2)
except SerialException:
print("ERROR Opening the Serial Port: "+values['Port'])
event='Exit'
s.close()
break
#sleep(1)
ok = False
for i in range(3):
line=s.readline().strip()
# print(line)
if line == b'ready':
ok = True
break
if not ok:
print('NOT CONNECTED')
event='Exit'
s.close()
break
recibido = 'CONNECTED\n'
window['box'].update(recibido)
update()
if connected and event == 'Disconnect': #######DISCONNECT
connected = False
window['Connect'].update(disabled=False)
window['Disconnect'].update(disabled=True)
s.close()
if event == sg.WIN_CLOSED or event == 'Exit': break
if connected and event == 'STOP': send('X')
if connected and event == 'Start':
file = sg.popup_get_file('Filename to store test data:', save_as = True)
recibido = ''
busy = True
send('S')
thread = Thread(target=receive, args=('.'))
thread.start()
if connected and event == 'ManualMeasurement':
send('?')
line = s.readline().decode()
recibido += line
window['box'].update(recibido)
if event == 'Set':
MaximumElongation = values['Elon']
MaxPushForce = values['Force']
if connected: # if connected then push the values to the Arduino
update()
config['General']['MaxPushForce'] = MaxPushForce
config['General']['MaximumElongation'] = MaximumElongation
config['SerialPort']['COM'] = values['Port']
with open('defaults.ini', 'w') as configfile:
config.write(configfile)
if connected and event == 'StartManualTest':
file = sg.popup_get_file('Filename to store test data:', save_as = True)
recibido = ''
busy = True
send('M')
thread = Thread(target=receive, args=('.'))
thread.start()
window.close()
s.close()
| 2.3125 | 2 |
ListView_II_Vistas_basadas_en_clases/core/erp/tests.py | BrianMarquez3/Python-Django | 2 | 12759511 | <reponame>BrianMarquez3/Python-Django
from config.wsgi import *
from core.erp.models import *
data = ['Leche y derivados', 'Carnes, pescados y huevos', 'Patatas, legumbres, frutos secos',
'Verduras y Hortalizas', 'Frutas', 'Cereales y derivados, azúcar y dulces',
'Grasas, aceite y mantequilla']
for i in data:
cat = Category(name=i)
cat.save()
print('Guardado registro N°{}'.format(cat.id))
| 2.359375 | 2 |
web1.py | kpharish/ISRS | 0 | 12759512 | import requests
from bs4 import BeautifulSoup
res = requests.get('https://www.imdb.com/chart/top/')
html = res.text
soup = BeautifulSoup(html, 'html.parser')
tbody = soup.find('tbody')
trs = tbody.findAll('tr')
for tr in trs:
td = tr.find('td', {'class': 'titleColumn'})
print(td.a)
# movieId = td.a['href']
# movieUrl = f'https://www.imdb.com/{movieId}'
# res2 = requests.get(movieUrl)
# html = res2.text
# soup2 = BeautifulSoup(html, 'html.parser')
# info = soup2.find('div', {'class': 'subtext'})
# a = info.findAll('a')
# print(td.a.string)
# print(info.time.string.strip())
# print(a[0].string.strip())
# print(a[1].string.strip()) | 3.15625 | 3 |
onmt/modules/Reward.py | KTH1234/deep_summ | 0 | 12759513 | # -*- coding: utf-8 -*-
import json
import onmt
import onmt.io
import torch
import torch.cuda
from torch.autograd import Variable
from rouge import Rouge
class Reward():
def __init__(self, reward):
self.reward = reward
print("Reward : {}".format(reward))
if reward == "rouge":
self.rouge = Rouge(metrics=["rouge-l"], stats=["f"])
elif "entailment" in reward:
# entail_model_path = "0617_entail_400k_model.tar.gz" # kor entail index:1
entail_model_path = "allennlp_eng_model.tar.gz" # eng entail index:0, cotra, neu:1,2
self.entailment = onmt.modules.Entailment_scorer(entail_model_path, 0) # model path and index of entailment
self.eps = -1e-5
self.error = 0
def get_rouge_reward(self, batch, sample_indices, max_indices, copy):
# print("Reward line:5 sample indices", sample_indices) # tgt_len * batch
# print("Reward line:5 max indices", max_indices) # tgt_len * batch
# print("rweard line:7 sample_indeices[:,0]", sample_indices[:,0])
# print("rweard line:20 batch", batch)
# print("rweard line:21 batch", batch.tgt)
tgt_vocab = batch.dataset.fields['tgt'].vocab
global_vocab = batch.dataset.fields['src'].vocab
# print("batch src")
# print(batch.src)
# input()
sample_scores = []
max_scores = []
alignments = [] # for efficiency, calculate alignments here
for i in range(len(batch)):
in_batch_index = batch.indices.data[i]
# print("Reward line:11 in batch index",in_batch_index)
# print("Reward line:29 in raw example", len(batch.dataset.examples))
# print("Reward line:30 batch dataset fileds tgt", batch.dataset.fields['tgt'])
src_vocab = batch.dataset.src_vocabs[in_batch_index] if copy else global_vocab
# raw_tgt = batch.dataset.examples[in_batch_index].tgt
raw_tokens = self.build_target_tokens(src_vocab, tgt_vocab, batch.tgt.data[1:,i])
# print("reward line:36 raw_tgt", raw_tgt)
# print("reward line:37 raw_tokens", raw_tokens)
sample_tokens = self.build_target_tokens(src_vocab, tgt_vocab, sample_indices[:,i])
max_tokens = self.build_target_tokens(src_vocab, tgt_vocab, max_indices[:,i])
# print("reward line:16 sample tokens",sample_tokens)
sample_rouge_f1_s = self.calculate_rouge(sample_tokens, raw_tokens)
max_rouge_f1_s = self.calculate_rouge(max_tokens, raw_tokens)
# calculate alginemts
mask = [0] + [src_vocab.stoi[w] for w in sample_tokens]
alignments.append(mask)
# print("reward line:37 sample_tokens", sample_tokens)
# print("reward line:37 max_tokens", max_tokens)
sample_scores.append(sample_rouge_f1_s['rouge-l']['f'])
max_scores.append(max_rouge_f1_s['rouge-l']['f'])
if torch.rand(1)[0] <= 0.005:
src_tokens = self.build_target_tokens(src_vocab, batch.dataset.fields['src'].vocab, batch.src[0].data[:,i])
print("in batch index = {}".format(in_batch_index))
print("\t src tokes")
print("\t\t", src_tokens)
print("\t target tokens")
print("\t\t", raw_tokens)
print("\tsampled tokens")
print("\t\t", sample_scores[-1], sample_tokens)
print("\t max tokens")
print("\t\t", max_scores[-1], max_tokens)
# print("Rewards line:72 alignments", alignments )
max_sample_len = max(len(x) for x in alignments)
max_sample_len = max(sample_indices.size(0)+1, max_sample_len)
# print("Reward line:75 sample_indices", sample_indices.size())
# print("Reward line:76 max", max(len(x) for x in alignments))
for i in range(len(alignments)):
alignments[i] += [0] * max(0, max_sample_len - len(alignments[i]))
alignments[i] = torch.LongTensor(alignments[i]).cuda()
# print("Rewards line:77 alignments", alignments )
sample_alignments = torch.stack(alignments).transpose(0,1)
# print("reward line:29 rouge", sample_rouge_f1_s, max_rouge_f1_s)
sample_scores = torch.Tensor(sample_scores).cuda()
max_scores = torch.Tensor(max_scores).cuda()
batch_scores = max_scores - sample_scores
return batch_scores, sample_scores, max_scores, sample_alignments
def get_entailment_reward(self, batch, sample_indices, max_indices, entail_type):
# print("Reward line:5 sample indices", sample_indices) # tgt_len * batch
# print("Reward line:5 max indices", max_indices) # tgt_len * batch
# print("rweard line:7 sample_indeices[:,0]", sample_indices[:,0])
# print("rweard line:20 batch", batch)
# print("rweard line:21 batch", batch.tgt)
tgt_vocab = batch.dataset.fields['tgt'].vocab
src_vocab = batch.dataset.fields['src'].vocab
# print("batch src")
# print(batch.src)
# input()
sample_scores = []
max_scores = []
alignments = [] # for efficiency, calculate alignments here
for i in range(len(batch)):
in_batch_index = batch.indices.data[i]
# print("Reward line:11 in batch index",in_batch_index)
# print("Reward line:29 in raw example", len(batch.dataset.examples))
# print("Reward line:30 batch dataset fileds tgt", batch.dataset.fields['tgt'])
# src_vocab = batch.dataset.src_vocabs[in_batch_index]
# raw_tgt = batch.dataset.examples[in_batch_index].tgt
raw_src_tokens = self.build_src_tokens(src_vocab, batch.src[0].data[:,i])
# print("reward line:36 raw_tgt", raw_tgt)
# print("reward line:37 raw_tokens", raw_tokens)
sample_tokens = self.build_target_tokens(src_vocab, tgt_vocab, sample_indices[:,i])
max_tokens = self.build_target_tokens(src_vocab, tgt_vocab, max_indices[:,i])
# print("reward line:16 sample tokens",sample_tokens)
# sample_entail_s = self.get_entailment_score(raw_src_tokens, sample_tokens)
# max_entail_s = self.get_entailment_score(raw_src_tokens, max_tokens)
# calculate alginemt
instance_src_vocab = batch.dataset.src_vocabs[in_batch_index]
raw_tokens = self.build_target_tokens(instance_src_vocab, tgt_vocab, batch.tgt.data[1:,i])
if entail_type == "entailment_src_hyp_sample":
hyp_tokens = raw_src_tokens
elif entail_type == "entailment_src_hyp_gold":
hyp_tokens = raw_src_tokens
max_tokens = raw_tokens
elif entail_type == "entailment_tgt_hyp":
hyp_tokens = raw_tokens
else:
input("Parameter Error!")
sample_entail_s = self.get_entailment_score(hyp_tokens, sample_tokens, True)
max_entail_s = self.get_entailment_score(hyp_tokens, max_tokens, True) # use gold target to baseline
mask = [0] + [instance_src_vocab.stoi[w] for w in sample_tokens]
alignments.append(mask)
# print("reward line:37 sample_tokens", sample_entail_s, sample_tokens)
# print("reward line:37 max_tokens", max_entail_s, max_tokens)
sample_scores.append(sample_entail_s)
max_scores.append(max_entail_s)
if torch.rand(1)[0] <= 0.005:
# src_tokens = self.build_target_tokens(src_vocab, batch.dataset.fields['src'].vocab, batch.src[0].data[:,i])
src_tokens = raw_src_tokens
print("in batch index = {}".format(in_batch_index))
print("\t src tokes")
print("\t\t", src_tokens)
print("\t target tokens")
print("\t\t", raw_tokens)
print("\tsampled tokens")
print("\t\t", sample_scores[-1], sample_tokens)
print("\t max tokens")
print("\t\t", max_scores[-1], max_tokens)
# print("Rewards line:72 alignments", alignments )
max_sample_len = max(len(x) for x in alignments)
max_sample_len = max(sample_indices.size(0)+1, max_sample_len)
# print("Reward line:75 sample_indices", sample_indices.size())
# print("Reward line:76 max", max(len(x) for x in alignments))
for i in range(len(alignments)):
alignments[i] += [0] * max(0, max_sample_len - len(alignments[i]))
alignments[i] = torch.LongTensor(alignments[i]).cuda()
# print("Rewards line:77 alignments", alignments )
sample_alignments = torch.stack(alignments).transpose(0,1)
# print("reward line:29 rouge", sample_rouge_f1_s, max_rouge_f1_s)
sample_scores = torch.Tensor(sample_scores).cuda()
max_scores = torch.Tensor(max_scores).cuda()
batch_scores = max_scores - sample_scores
return batch_scores, sample_scores, max_scores, sample_alignments
def get_batch_reward(self, batch, sample_indices, max_indices, copy=None):
if self.reward == "rouge":
assert copy is not None
return self.get_rouge_reward(batch, sample_indices, max_indices, copy)
elif "entailment" in self.reward:
return self.get_entailment_reward(batch, sample_indices, max_indices, self.reward)
def get_entailment_score(self, src_tokens, sample_tokens, length_penalty=False):
premise = " ".join(src_tokens)
hypothesis = " ".join(sample_tokens)
json_data = {"premise":premise, "hypothesis":hypothesis}
# json_data = json.dumps(json_data, ensure_ascii=False)
# print(json_data)
score = self.entailment.predict_entailment(json_data)
if length_penalty:
penalty = len(sample_tokens) / len(src_tokens)
score = penalty * score
return score
def calculate_rouge(self, hyp, ref):
hyp = " ".join(hyp)
ref = " ".join(ref)
score = self.rouge.get_scores(hyp, ref)
return score[0]
def build_src_tokens(self, src_vocab, indices):
tokens = []
# print("reward line:18 onmt.io.EOS_WORD", onmt.io.EOS_WORD)
for tok in indices:
try:
tokens.append(src_vocab.itos[tok])
except IndexError:
self.error += 1
print("Reward line 82: Error index occured {}".format(self.error))
tokens.append('<unk>')
return tokens
def build_target_tokens(self, src_vocab, tgt_vocab, pred):
tokens = []
# print("reward line:18 onmt.io.EOS_WORD", onmt.io.EOS_WORD)
for tok in pred:
try:
if tok < len(tgt_vocab):
tokens.append(tgt_vocab.itos[tok])
else:
tokens.append(src_vocab.itos[tok - len(tgt_vocab)])
if tokens[-1] == onmt.io.EOS_WORD:
tokens = tokens[:-1]
break
except IndexError:
self.error += 1
print("Reward line 82: Error index occured {}".format(self.error))
tokens.append('<unk>')
return tokens
def criterion(self, input, seq, reward):
# print("reward line 69 input", input)
# print("reward line 69 seq", seq)
# print("reward line 69 reward", reward)
# print("reward line 69 reward", reward.expand_as(input))
reward = reward.expand_as(input) + self.eps
print("reward line 76 reward", reward)
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
input = to_contiguous(input).view(-1)
reward = to_contiguous(reward).view(-1)
mask = (seq>0).float()
mask = to_contiguous(torch.cat([mask.new(mask.size(0), 1).fill_(1), mask[:, :-1]], 1)).view(-1)
print("reward line 89 input req", input.requires_grad)
# output = - input * reward * Variable(mask)
output = - input
output = torch.sum(output) / torch.sum(mask) / 7
# output = torch.sum(output)
return output
| 2.28125 | 2 |
var/spack/repos/builtin/packages/nccmp/package.py | jeanbez/spack | 0 | 12759514 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Nccmp(CMakePackage):
"""Compare NetCDF Files"""
homepage = "http://nccmp.sourceforge.net/"
url = "https://gitlab.com/remikz/nccmp/-/archive/1.9.0.1/nccmp-1.9.0.1.tar.gz"
version('1.9.0.1', sha256='81e9753cf451afe8248d44c841e102349e07cde942b11d1f91b5f85feb622b99')
version('1.8.9.0', sha256='da5d2b4dcd52aec96e7d96ba4d0e97efebbd40fe9e640535e5ee3d5cd082ae50')
version('1.8.2.0', sha256='7f5dad4e8670568a71f79d2bcebb08d95b875506d3d5faefafe1a8b3afa14f18')
depends_on('netcdf-c')
| 1.25 | 1 |
checkout/views.py | shawnvogt/DjangoReactNextjs-Training-django-ambassador | 0 | 12759515 | <filename>checkout/views.py<gh_stars>0
from django.db import transaction
from checkout.serializers import LinkSerializer
from core.models import Link, Order, Product, OrderItem
from rest_framework.views import APIView
from rest_framework import exceptions
from rest_framework.response import Response
import decimal
import stripe
from django.conf import settings
from django.core.mail import send_mail
class LinkAPIView(APIView):
def get(self, _, code=''):
link = Link.objects.filter(code=code).first()
serializer = LinkSerializer(link)
return Response(serializer.data)
class OrderAPIView(APIView):
@transaction.atomic
def post(self, request):
data = request.data
link = Link.objects.filter(code=data['code']).first()
if not link:
raise exceptions.APIException('Invalid code!')
try:
order = Order()
order.code = link.code
order.user_id = link.user.id
order.ambassador_email = link.user.email
order.first_name = data['first_name']
order.last_name = data['last_name']
order.email = data['email']
order.address = data['address']
order.country = data['country']
order.city = data['city']
order.zip = data['zip']
order.save()
line_items = []
for item in data['products']:
product = Product.objects.filter(pk=item['product_id']).first()
quantity = decimal.Decimal(item['quantity'])
order_item = OrderItem()
order_item.order = order
order_item.product_title = product.title
order_item.price = product.price
order_item.quantity = quantity
order_item.ambassador_revenue = decimal.Decimal(0.1) * product.price * quantity
order_item.admin_revenue = decimal.Decimal(0.9) * product.price * quantity
order_item.save()
line_items.append({
'name': product.title,
'description': product.description,
'images': [product.image],
'amount': int(product.price * 100),
'currency': 'CAD',
'quantity': quantity
})
# stripe.api_key = settings.STRIPE_SECRET_KEY
stripe.api_key = '<KEY>'
source = stripe.checkout.Session.create(
success_url='http://localhost:5000/success?source={CHECKOUT_SESSION_ID}',
cancel_url='http://localhost:5000/error',
payment_method_types=['card'],
line_items=line_items
)
order.transaction_id = source['id']
order.save()
return Response(source)
except Exception as e:
transaction.rollback()
return Response({'message': 'Error occurred'})
class OrderConfirmAPIView(APIView):
def post(self, request):
order = Order.objects.filter(transaction_id=request.data['source']).first()
if not order:
raise exceptions.APIException('Order not found!')
order.complete = True
order.save()
send_mail(
subject='An order has been completed',
message='Order #' + str(order.id) + ' with a total of $' + str(order.admin_revenue) + ' has been completed',
from_email='<EMAIL>',
recipient_list=['<EMAIL>']
)
send_mail(
subject='An order has been completed',
message='You earned $' + str(order.ambassador_revenue) + ' from the link #' + order.code + '!',
from_email='<EMAIL>',
recipient_list=[order.ambassador_email]
)
return Response({'message': 'success'}) | 2.078125 | 2 |
albionmarket_backend/resources/orders_resources.py | akku123california/albionmarket-backend | 0 | 12759516 | <reponame>akku123california/albionmarket-backend
from flask_restful import Resource
from sqlalchemy import not_
from .orders_stats import fetch_item_market_stats
from ..models import Item
class OrdersResourcesV1(Resource):
def get(self):
resources = Item.query.filter(not_(Item.id.like('%_LEVEL%'))).filter_by(category_id='resources')
results = []
for item in resources:
stats = {
'stats': fetch_item_market_stats(item.id),
'item': {
'id': item.id,
'name': item.name,
'category_id': item.category_id,
'category_name': item.category.name,
'sub_category_id': item.sub_category_id,
'sub_category_name': item.sub_category.name,
'tier': item.tier,
},
}
results.append(stats)
data = {
'resources': results
}
return data, 200
| 2.515625 | 3 |
swig_post_process.py | LaudateCorpus1/firebase-unity-sdk | 38 | 12759517 | <reponame>LaudateCorpus1/firebase-unity-sdk
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert enums from C naming convention to C# naming convention.
We have multiple cases where we need to run post-process fixes to SWIG generated
output, so this provides a single place to base whole file edits in-place, using
Python. Each process can be defined in it's own class so that it can keep
internal state, which might be useful for doing multiple passes.
"""
import os
import re
from absl import app
from absl import flags
from absl import logging
FLAGS = flags.FLAGS
flags.DEFINE_string('csharp_dir', None,
'Path with generated csharp files to be scrubbed.')
flags.DEFINE_string('cpp_dir', None,
'Path with generated C++ files to fix up.')
flags.DEFINE_string('dll_import', None,
'Library name to use in DllImport statements. '
'If this is unspecified DllImport statements are not '
'modified.')
flags.DEFINE_string('module_name_prefix', None,
'Prefix to add to generated C methods.')
flags.DEFINE_string('exception_module_name', None,
'Name of the module to check for pending exceptions.')
flags.DEFINE_list('exception_ignore_path_prefixes', [],
'List of file path prefixes to ignore when replacing the '
'name of the module that checks for pending exceptions.')
flags.DEFINE_boolean('fix_sealed_classes', True,
'Remove the virtual modifier from sealed class methods '
'and make protected members private.')
flags.DEFINE_boolean('rename_async_methods', True,
'Rename all methods that return Task to *Async().')
flags.DEFINE_boolean('snake_to_camel_case_args', True,
'Rename method arguents from snake_case to camelCase.')
flags.DEFINE_boolean('internal_visibility', True,
'Change visibility to internal for all methods and '
'properties that end with "Internal".')
class SWIGPostProcessingInterface(object):
"""An interface for objects that scrub code generated from SWIG."""
def __call__(
self, file_str, filename, iteration): # pylint: disable=unused-argument
"""Modify the contents of a file as a string and return the result.
This is executed on each file and then repeated until a pass over all the
files has no effect for this process.
Args:
file_str: This is a string containing the file contents to be processed.
filename: This is the full path of the file being processed.
iteration: Pass number (0-based) for how many times this process has run
on this file. The process runs until no changes are made.
Returns:
Returns the modified file_str.
"""
return file_str
class SWIGEnumPostProcessing(SWIGPostProcessingInterface):
"""A class to scrub swig generated code and convert enums from C++ to C#."""
def __init__(self):
"""Initializes this instance."""
self.enum_mapping = {}
def _replace_enum_lines(self, enum_lines, enum_name):
"""Internal. Processes lines inside the enum, and saves the mapping."""
def _cache_replace(match):
"""Convert enum lines, and save mappings from old to new enum names."""
self.enum_mapping[match.group('old')] = match.group('new')
return match.group('space') + match.group('new') + match.group('end')
# ^\s* avoids anything inside comments by restricting to the first non space
# text on the line.
# Note: This will still break if using block style comments and the first
# word starts with 'k'. In practice all of our comments are led by ///.
# The named group "old" matches at least a k, and then optionally the enum
# identifier after. For example kInitResultSuccess, will match kInitResult
# if InitResult is the enum_name.
# The named group "new" matches any symbol with letters and numbers, and
# underscores.
# The named group "end" matches anything left after that.
return re.sub((r'^(?P<space>\s*)(?P<old>k(?:%s)?(?P<new>[a-zA-Z_0-9]*))'
r'(?P<end>.*)') %
enum_name, _cache_replace, enum_lines, flags=re.MULTILINE)
def __call__(self, file_str, filename, iteration):
"""Processes each file for enum definitions, and processes them.
Enums definitions are scraped from the file strings passed in and are
modified in place, stripping the k<enum_name> prefix. Each renamed enum is
also be added to a dictionary mapping on the class, which is used for
doing reference fixups.
Args:
file_str: This is a string containing the file contents to be processed.
filename: Unused.
iteration: Unused.
Returns:
Returns the modified file_str.
"""
# Replacement function which captures the inside of the enum for extraction.
def repl(m):
ret = ''.join([m.group('pre'),
m.group('enum_name'),
m.group('start'),
self._replace_enum_lines(m.group('enum_lines'),
m.group('enum_name')),
m.group('end')])
return ret
file_str = re.sub(
(r'(?P<pre>\s+enum\s+)(?P<enum_name>[a-zA-Z_][a-zA-Z_0-9]*)'
r'(?P<start>\s*{\s*)(?P<enum_lines>.*?)(?P<end>\s*})'),
repl, file_str, flags=re.DOTALL)
return file_str
class PostProcessDllImport(SWIGPostProcessingInterface):
"""Replace DllImport statements in cs files."""
def __init__(self, shared_lib_name):
"""Initialize the instance.
Args:
shared_lib_name: Name of the shared library to load.
"""
self.shared_lib_name = shared_lib_name
self.replace_regexp = re.compile(
r'(\[global::.*\.DllImport\()("[^"]*")(,[^\]]*)')
def __call__(self, file_str, filename, iteration):
"""Replace DllImport statements to load shared_lib_name.
Args:
file_str: This is a string containing the file contents to be processed.
filename: Unused.
iteration: Unused.
Returns:
Returns the modified file_str.
"""
return self.replace_regexp.sub(r'\g<1>"%s"\g<3>' % self.shared_lib_name,
file_str)
class NamespaceCMethods(SWIGPostProcessingInterface):
"""Add a module namespace prefix to all generated C methods."""
def __init__(self, module_name):
"""Initialize the instance.
Args:
module_name: Name of the module to use as a prefix for C methods.
"""
self.module_name = module_name
self.replace_regexp = re.compile(r'([ "])(CSharp_)([^"(]*)')
def __call__(self, file_str, filename, iteration):
"""Add a prefix to all generated C methods statements.
Args:
file_str: This is a string containing the file contents to be processed.
filename: Unused.
iteration: Unused.
Returns:
Returns the modified file_str.
"""
return self.replace_regexp.sub(r'\g<1>%s_\g<2>\g<3>' % self.module_name,
file_str)
class ReplaceExceptionChecks(SWIGPostProcessingInterface):
"""Redirect module local exception checks to a global module."""
def __init__(self, module_name, ignore_paths):
"""Initialize the instance.
Args:
module_name: Name of the module to redirect exceptions to.
ignore_paths: List of path prefixes to ignore when doing the replacement.
"""
self.module_name = module_name
self.ignore_paths = ignore_paths
self.replace_regexp = re.compile(
r'[A-Za-z]+(PINVOKE\.SWIGPendingException\.Pending.*throw *)'
r'[A-Za-z]+(PINVOKE\.SWIGPendingException\.Retrieve)')
def __call__(self, file_str, filename, iteration):
"""Redirect module local exception checks to a global module.
Args:
file_str: This is a string containing the file contents to be processed.
filename: Only performs the replacement if this file isn't in the PINVOKE
module.
iteration: Unused.
Returns:
Returns the modified file_str.
"""
if filename.endswith('PINVOKE.cs'):
return file_str
for path in self.ignore_paths:
if path in filename:
return file_str
return self.replace_regexp.sub(
r'{module_name}\g<1>{module_name}\g<2>'.format(
module_name=self.module_name), file_str)
class FixSealedClasses(SWIGPostProcessingInterface):
"""Fixes sealed class members.
* Removes the virtual modifier on methods in sealed classes.
* Changes protected members to private.
"""
def __init__(self):
"""Initialize the instance."""
self.virtual_regexp = re.compile(r'( +)virtual +')
self.protected_regexp = re.compile(r'protected ([^ ]+ [^ ]+ *;)')
self.sealed_class_regexp = re.compile(r'sealed .*class ([^ ]+).*{')
def __call__(self, file_str, filename, iteration):
"""Replace the virtual modifier on methods in sealed classes.
Args:
file_str: This is a string containing the file contents to be processed.
filename: Unused.
iteration: Unused.
Returns:
Returns the modified file_str.
"""
output = []
class_stack = []
scope_count = 0
for line_number, line in enumerate(file_str.splitlines()):
line_number += 1 # Convert from zero based series to 1 based.
if class_stack:
# If we're on a line inside a class rather than a method.
if class_stack[0][0] == scope_count:
logging.debug('%d: %s(%d), %s',
line_number, class_stack[0][1], scope_count, line)
line = self.virtual_regexp.sub(r'\g<1>', line)
line = self.protected_regexp.sub(r'private \g<1>', line)
elif scope_count < class_stack[0][0]:
logging.debug('%d: %s(%d), %s',
line_number, class_stack[0][1], scope_count,
'end of class')
class_stack.pop()
# Track the number of braces to determine which scope we're in.
# obviously this is going to break in cases where strings contain
# braces but we don't generate any code like that at the moment.
scope_count += line.count('{') - line.count('}')
class_match = self.sealed_class_regexp.search(line)
if class_match:
class_name = class_match.groups()[0]
logging.debug('%d: %s(%d), %s', line_number, class_name,
scope_count, 'found sealed class')
# Brace matched on the line so increase the scope count.
class_stack.append((scope_count, class_name))
output.append(line)
return '\n'.join(output)
class RenameAsyncMethods(SWIGPostProcessingInterface):
"""Renames all methods that return a Task to *Async()."""
def __init__(self):
"""Initialize the instance."""
self.replace_regexps = [
re.compile(r'( +System\.Threading\.Tasks\.Task<[^>]+> +)'
r'([^ ]+)( *\([^)]*\) *{)$'),
re.compile(r'( +System\.Threading\.Tasks\.Task +)'
r'(\w+)( *\([^)]*\) *{)$')]
def __call__(self, file_str, filename, iteration):
"""Renames all methods that return a Task to *Async().
Args:
file_str: This is a string containing the file contents to be processed.
filename: Unused.
iteration: Unused.
Returns:
Returns the modified file_str.
"""
output = []
for line in file_str.splitlines():
for regexp in self.replace_regexps:
m = regexp.search(line)
if m:
# If the function name already ends with Async or is GetTask
# (since it's from a Future result class) don't replace it.
function_name = m.groups()[1]
if function_name.endswith('Async') or function_name == 'GetTask':
break
line = regexp.sub(r'\g<1>\g<2>Async\g<3>', line)
break
output.append(line)
return '\n'.join(output)
class RenameArgsFromSnakeToCamelCase(SWIGPostProcessingInterface):
"""Renames all public method arguments from snake_case to camelCase."""
def __init__(self):
"""Initialize the instance."""
self.function_regexp = re.compile(
r'(public.*? )([^ (]+ *\()([^)]+)(\).*{)$')
@staticmethod
def snake_to_camel_case(identifier):
"""Convert an identifier string from snake_case to camelCase.
Args:
identifier: Identifier to convert.
Returns:
Modifier identifier string.
"""
output_words = []
for index, word in enumerate(identifier.split('_')):
if index == 0:
output_words.append(word)
elif word:
output_words.append(word[0].upper() + word[1:])
return ''.join(output_words)
@staticmethod
def replace_arguments(replacements, line):
"""Replace arguments in a line.
Args:
replacements: List of (regex, replacement) tuples to replace in the line.
line: Line to modify.
Returns:
Modified line.
"""
for argument_regexp, argument_replacement in replacements:
line = argument_regexp.sub(argument_replacement, line)
return line
def __call__(self, file_str, filename, iteration):
"""Rename all public method arguments from snake_case to camelCase.
Args:
file_str: This is a string containing the file contents to be processed.
filename: Unused.
iteration: Unused.
Returns:
Returns the modified file_str.
"""
output = []
function_stack = []
scope_count = 0
for line_number, line in enumerate(file_str.splitlines()):
line_number += 1 # Convert from zero based series to 1 based.
# Track the number of braces to determine which scope we're in.
# obviously this is going to break in cases where strings contain
# braces but we don't generate any code like that at the moment.
# This also breaks with single line functions e.g
# public void SomeFunc(int bish_bash) { state += bish_bash; }
scope_count += line.count('{') - line.count('}')
# Search for a function on the line.
function_match = self.function_regexp.search(line)
if function_match:
function_name = function_match.groups()[1]
argument_substitutions = []
# Build a set of identifier replacements to apply.
for type_argument in function_match.groups()[2].split(','):
type_argument = type_argument.strip()
logging.debug('%d: %s types and args %s', line_number, function_name,
str(type_argument))
# Split type and argument, handling generic types.
end_of_type = type_argument.rfind('>')
end_of_type = (end_of_type if end_of_type > 0 else
type_argument.rfind(' '))
argument = type_argument[end_of_type:].strip()
camel_case_arg = RenameArgsFromSnakeToCamelCase.snake_to_camel_case(
argument)
if argument != camel_case_arg:
logging.debug('%d: %s arg %s --> %s', line_number, function_name,
argument, camel_case_arg)
regex = r'(\b)' + argument + r'(\b)'
argument_substitutions.append((
re.compile(regex), r'\g<1>' + camel_case_arg + r'\g<2>'))
logging.debug('%d: %s(%d), %s args=%s', line_number, function_name,
scope_count, 'found function',
str([a[1] for a in argument_substitutions]))
function_stack.append((scope_count, function_name,
argument_substitutions))
# Update the doc string if there is one.
if output and argument_substitutions:
line_index = len(output) - 1
while (line_index >= 0 and
output[line_index].lstrip().startswith('///')):
output[line_index] = (
RenameArgsFromSnakeToCamelCase.replace_arguments(
argument_substitutions, output[line_index]))
line_index -= 1
if function_stack:
if function_stack[0][0] == scope_count:
line = RenameArgsFromSnakeToCamelCase.replace_arguments(
function_stack[0][2], line)
logging.debug('%d: %s(%d), %s', line_number, function_stack[0][1],
scope_count, line)
elif scope_count < function_stack[0][0]:
logging.debug('%d: %s(%d), %s', line_number, function_stack[0][1],
scope_count, 'end of function')
function_stack.pop()
output.append(line)
return '\n'.join(output)
class InternalMethodsToInternalVisibility(SWIGPostProcessingInterface):
"""Changes visibility to internal for "Internal" methods and properties.
Any methods or properties with an identifier that ends with "Internal" are
changed to internal visibility.
"""
def __init__(self):
"""Initialize the instance."""
self.function_property_regexp = re.compile(
r'(public.*? )([^ )]+Internal)($| +|[({])')
def __call__(self, file_str, filename, iteration):
"""Change "Internal" methods and properties to internal visibility.
Args:
file_str: This is a string containing the file contents to be processed.
filename: Unused.
iteration: Unused.
Returns:
Returns the modified file_str.
"""
output = []
for line in file_str.splitlines():
match = self.function_property_regexp.search(line)
if match:
visibility = match.groups()[0]
line = ''.join([line[:match.start(1)],
visibility.replace('public', 'internal'),
line[match.end(1):]])
output.append(line)
return '\n'.join(output)
class DynamicToReinterpretCast(SWIGPostProcessingInterface):
"""Changes the use of dynamic_cast in SWIG generated code to reinterpret_cast.
SWIG uses dynamic_cast even though the type is guaranteed to be
derived from the base class. This only happens when you use the
"directors" feature, and it's safe to replace with reinterpret cast to
avoid needing RTTI.
"""
def __call__(self, file_str, filename, iteration):
"""Change dynamic_cast to reinterpret_cast.
Args:
file_str: This is a string containing the file contents to be processed.
filename: Unused.
iteration: Unused.
Returns:
Returns the modified file_str.
"""
if iteration != 0:
return file_str
return re.sub(r'dynamic_cast<', 'reinterpret_cast<', file_str)
class AddPInvokeAttribute(SWIGPostProcessingInterface):
"""Adds missing PInvoke attributes.
Unity requires certain CSharp to C++ functions be tagged with this
attribute to allow proper C++ code gen from IL.
"""
def __init__(self):
"""Initialize the instance."""
self.search_strings = [
('ExceptionArgumentDelegate',
re.compile('static void SetPendingArgument.*string paramName\\)')),
('FirestoreExceptionDelegate',
re.compile('static void SetPendingFirestoreException.*string message')),
('ExceptionDelegate',
re.compile('static void SetPending.*string message')),
('SWIGStringDelegate',
re.compile('static string CreateString')),
]
def __call__(self, file_str, filename, iteration):
"""Adds missing PInvoke attributes.
Args:
file_str: This is a string containing the file contents to be processed.
filename: Unused.
iteration: Unused.
Returns:
Returns the modified file_str.
"""
if iteration != 0:
return file_str
def get_line_delegate(line):
"""Checks to see if a line matches search regex and gets the delegate name.
Args:
line: Input line
Returns:
Delegate name if line matches or None
"""
for delegate, regex in self.search_strings:
if regex.search(line):
return delegate
return None
output = []
for line in file_str.splitlines():
delegate = get_line_delegate(line)
if delegate:
leading_whitespace = re.search(r'^(\s+)', line).group(0)
output.append('%s[MonoPInvokeCallback(typeof(%s))]' %
(leading_whitespace, delegate))
output.append(line)
return '\n'.join(output)
def apply_post_process(file_buffer, file_name, processes):
"""Applys a set of transformation processes to a file.
This takes objects of the SWIGPostProcessingInterface "processes", and
continuously triggers them on a file until no more changes are applied.
Args:
file_buffer: Contents of the file
file_name: Path of the file
processes: A list of objects implementing the SWIGPostProcessingInterface.
Returns:
Contents of the file with transformations applied.
"""
iteration = 0
while processes:
# keep track of any process that caused a change, to make sure we do another
# pass. Each process is run over all files until no changes are made.
processors_still_in_effect = set()
for proc in processes:
file_buffer_after = proc(file_buffer, file_name, iteration)
if file_buffer_after != file_buffer:
processors_still_in_effect.add(proc)
file_buffer = file_buffer_after
iteration += 1
# preserve list order and remove processors which had no effect.
processes = [x for x in processes if x in processors_still_in_effect]
return file_buffer
def post_process(directory, file_extensions, processes):
"""Scrubs all the code generated by SWIG with post process scripts.
This takes objects of the SWIGPostProcessingInterface "processes", and
triggers them on each file in the "directory".
Args:
directory: Directory containing all source files to be processed.
file_extensions: List of extensions of files to process.
processes: A list of objects implementing the SWIGPostProcessingInterface.
"""
# Get all of the paths to files in the proxy dir.
paths = []
for path in [os.path.join(directory, f) for f in os.listdir(directory)]:
if ((not os.path.isdir(path)) and
os.path.splitext(path)[1] in file_extensions):
paths.append(path)
# Edit each file in-place using the set of post processing objects in the
# processes list.
for path in paths:
with open(path, 'r+') as f:
file_buffer = f.read()
f.seek(0)
file_buffer_after = apply_post_process(file_buffer, path, processes)
if file_buffer != file_buffer_after:
f.write(file_buffer_after)
f.truncate()
def main(unused_argv):
"""Registers a list of post processes and triggers them on the csharp_dir.
Args:
unused_argv: Extra arguments not consumed by the config flags.
Returns:
The exit code status; 0 for success, non-zero for an error.
"""
# Create the post processing objects
post_processes = [SWIGEnumPostProcessing(), AddPInvokeAttribute()]
if FLAGS.dll_import:
post_processes += [PostProcessDllImport(FLAGS.dll_import)]
if FLAGS.module_name_prefix:
post_processes += [NamespaceCMethods(FLAGS.module_name_prefix)]
if FLAGS.exception_module_name:
post_processes += [ReplaceExceptionChecks(FLAGS.exception_module_name,
FLAGS.exception_ignore_path_prefixes)]
if FLAGS.fix_sealed_classes:
post_processes += [FixSealedClasses()]
if FLAGS.internal_visibility:
post_processes += [InternalMethodsToInternalVisibility()]
if FLAGS.rename_async_methods:
post_processes += [RenameAsyncMethods()]
if FLAGS.snake_to_camel_case_args:
post_processes += [RenameArgsFromSnakeToCamelCase()]
post_process(FLAGS.csharp_dir, ['.cs'], post_processes)
if FLAGS.cpp_dir:
post_processes = [DynamicToReinterpretCast()]
if FLAGS.module_name_prefix:
post_processes += [NamespaceCMethods(FLAGS.module_name_prefix)]
post_process(FLAGS.cpp_dir, ['.h', '.cc', '.cpp'], post_processes)
return 0
if __name__ == '__main__':
flags.mark_flag_as_required('csharp_dir')
app.run(main)
| 1.445313 | 1 |
pytorch_translate/research/word_prediction/word_prediction_model.py | liezl200/translate-1 | 1 | 12759518 | from fairseq.models import (
register_model,
register_model_architecture,
FairseqModel,
)
from pytorch_translate import vocab_reduction
from pytorch_translate.rnn import (
torch_find,
LSTMSequenceEncoder,
RNNEncoder,
RNNDecoder,
)
from .word_predictor import WordPredictor
class FairseqWordPredictionModel(FairseqModel):
def __init__(self, encoder, decoder, predictor):
super().__init__(encoder, decoder)
self.predictor = predictor
def forward(self, src_tokens, src_lengths, prev_output_tokens):
encoder_output = self.encoder(src_tokens, src_lengths)
pred_output = self.predictor(encoder_output)
decoder_output = self.decoder(prev_output_tokens, encoder_output)
return pred_output, decoder_output
def get_predictor_normalized_probs(self, pred_output, log_probs):
return self.predictor.get_normalized_probs(pred_output, log_probs)
def get_target_words(self, sample):
return sample['target']
@register_model('rnn_wp')
class RNNWordPredictionModel(FairseqWordPredictionModel):
@staticmethod
def add_args(parser):
parser.add_argument(
'--dropout',
default=0.1,
type=float,
metavar='D',
help='dropout probability',
)
parser.add_argument(
'--encoder-embed-dim',
type=int,
metavar='N',
help='encoder embedding dimension',
)
parser.add_argument(
'--encoder-freeze-embed',
default=False,
action='store_true',
help=('whether to freeze the encoder embedding or allow it to be '
'updated during training'),
)
parser.add_argument(
'--encoder-hidden-dim',
type=int,
metavar='N',
help='encoder cell num units',
)
parser.add_argument(
'--encoder-layers',
type=int,
metavar='N',
help='number of encoder layers',
)
parser.add_argument(
'--encoder-bidirectional',
action='store_true',
help='whether the first layer is bidirectional or not',
)
parser.add_argument(
'--averaging-encoder',
default=False,
action='store_true',
help=(
'whether use mean encoder hidden states as decoder initial '
'states or not'
),
)
parser.add_argument(
'--decoder-embed-dim',
type=int,
metavar='N',
help='decoder embedding dimension',
)
parser.add_argument(
'--decoder-freeze-embed',
default=False,
action='store_true',
help=('whether to freeze the encoder embedding or allow it to be '
'updated during training'),
)
parser.add_argument(
'--decoder-hidden-dim',
type=int,
metavar='N',
help='decoder cell num units',
)
parser.add_argument(
'--decoder-layers',
type=int,
metavar='N',
help='number of decoder layers',
)
parser.add_argument(
'--decoder-out-embed-dim',
type=int,
metavar='N',
help='decoder output embedding dimension',
)
parser.add_argument(
'--attention-type',
type=str,
metavar='EXPR',
help='decoder attention, defaults to dot',
)
parser.add_argument(
'--residual-level',
default=None,
type=int,
help=(
'First layer where to apply a residual connection. '
'The value should be greater than 0 and smaller than the number of '
'layers.'
),
)
parser.add_argument(
'--cell-type',
default='lstm',
type=str,
metavar='EXPR',
help='cell type, defaults to lstm, values:lstm, milstm, layer_norm_lstm',
)
# Granular dropout settings (if not specified these default to --dropout)
parser.add_argument(
'--encoder-dropout-in',
type=float,
metavar='D',
help='dropout probability for encoder input embedding',
)
parser.add_argument(
'--encoder-dropout-out',
type=float,
metavar='D',
help='dropout probability for encoder output',
)
parser.add_argument(
'--decoder-dropout-in',
type=float,
metavar='D',
help='dropout probability for decoder input embedding',
)
parser.add_argument(
'--decoder-dropout-out',
type=float,
metavar='D',
help='dropout probability for decoder output',
)
parser.add_argument(
'--sequence-lstm',
action='store_true',
help='use nn.LSTM implementation for encoder',
)
# new arg
parser.add_argument(
'--predictor-hidden-dim',
type=int,
metavar='N',
help='word predictor num units',
)
# Args for vocab reduction
vocab_reduction.add_args(parser)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
src_dict, dst_dict = task.source_dictionary, task.target_dictionary
base_architecture_wp(args)
if args.sequence_lstm:
encoder_class = LSTMSequenceEncoder
else:
encoder_class = RNNEncoder
encoder = encoder_class(
src_dict,
embed_dim=args.encoder_embed_dim,
freeze_embed=args.encoder_freeze_embed,
cell_type=args.cell_type,
num_layers=args.encoder_layers,
hidden_dim=args.encoder_hidden_dim,
dropout_in=args.encoder_dropout_in,
dropout_out=args.encoder_dropout_out,
residual_level=args.residual_level,
bidirectional=bool(args.encoder_bidirectional),
)
decoder = RNNDecoder(
src_dict=src_dict,
dst_dict=dst_dict,
vocab_reduction_params=args.vocab_reduction_params,
encoder_hidden_dim=args.encoder_hidden_dim,
embed_dim=args.decoder_embed_dim,
freeze_embed=args.decoder_freeze_embed,
out_embed_dim=args.decoder_out_embed_dim,
cell_type=args.cell_type,
num_layers=args.decoder_layers,
hidden_dim=args.decoder_hidden_dim,
attention_type=args.attention_type,
dropout_in=args.decoder_dropout_in,
dropout_out=args.decoder_dropout_out,
residual_level=args.residual_level,
averaging_encoder=args.averaging_encoder,
)
predictor = WordPredictor(
args.encoder_hidden_dim, args.predictor_hidden_dim, len(dst_dict)
)
return cls(encoder, decoder, predictor)
def get_targets(self, sample, net_output):
targets = sample['target'].view(-1)
possible_translation_tokens = net_output[-1]
if possible_translation_tokens is not None:
targets = torch_find(
possible_translation_tokens.data,
targets.data,
len(self.dst_dict),
)
return targets
@register_model_architecture('rnn_wp', 'rnn_wp')
def base_architecture_wp(args):
# default architecture
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_layers = getattr(args, 'encoder_layers', 1)
args.encoder_hidden_dim = getattr(args, 'encoder_hidden_dim', 512)
args.encoder_bidirectional = getattr(args, 'encoder_bidirectional', False)
args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', args.dropout)
args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', args.dropout)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_layers = getattr(args, 'decoder_layers', 1)
args.decoder_hidden_dim = getattr(args, 'decoder_hidden_dim', 512)
args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
args.attention_type = getattr(args, 'attention_type', 'dot')
args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', args.dropout)
args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', args.dropout)
args.averaging_encoder = getattr(args, 'averaging_encoder', False)
args.encoder_freeze_embed = getattr(args, 'encoder_freeze_embed', False)
args.decoder_freeze_embed = getattr(args, 'decoder_freeze_embed', False)
args.cell_type = getattr(args, 'cell_type', 'lstm')
vocab_reduction.set_arg_defaults(args)
args.sequence_lstm = getattr(args, 'sequence_lstm', False)
args.predictor_hidden_dim = getattr(args, 'predictor_hidden_dim', 512)
| 2.28125 | 2 |
SemanticParsing/parsing_vkapi.py | yeltayzhastay/Semantic-Analisis | 0 | 12759519 | <filename>SemanticParsing/parsing_vkapi.py
import Vk_parser as parser
import time
def main():
first_time = time.time()
access_token = "<KEY>"
getter = parser.Vk_parser(access_token)
ids = getter.SearchGroup('қылмыс')
token = "<KEY>"
vk_parse = parser.Vk_parser(access_token)
vk_parse.Get_sentimental(ids, 10)
vk_parse.to_csv('dataset20_01_21kaz.csv')
print('Parsing data finished!', round(time.time() - first_time, 2), 'sec')
if __name__ == "__main__":
main() | 2.6875 | 3 |
tests/benchmarks/cli/commands/test_help.py | iterative/dvc-benchmark | 0 | 12759520 | def test_help(bench_dvc):
bench_dvc("--help", rounds=100)
| 0.941406 | 1 |
tonyc_utils/prompts.py | tonycpsu/tonyc_utils | 0 | 12759521 | import readline
readline.parse_and_bind("tab: menu-complete")
import getch
def rlinput(prompt, prefill=''):
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return input(prompt)
finally:
readline.set_startup_hook()
class Completer:
def __init__(self, words):
self.words = words
self.prefix = None
def complete(self, prefix, index):
if prefix != self.prefix:
# we have a new prefix!
# find all words that start with this prefix
self.matching_words = [
w for w in self.words if w.startswith(prefix)
]
self.prefix = prefix
try:
return self.matching_words[index]
except IndexError:
return None
def query_choice(choices,
prompt="Choice: ",
default=None, single_char=False,
allow_other=False,
case_insensitive=False):
while True:
if single_char:
print(prompt, end=' ')
choice = getch.getche()
print()
else:
completer = Completer(choices)
readline.set_completer(completer.complete)
choice = rlinput(prompt=prompt, prefill=default or None)
readline.set_completer(None)
if not allow_other:
if (
(case_insensitive and choice not in choices)
or (not case_insensitive
and choice.lower()
not in [ c.lower() for c in choices ]
)
):
print("%s not one of: %s" %(choice, ','.join(choices)))
continue
return choice
def query_yes_no(prompt=">>> ", default="y", single_char=False):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes":True, "y":True, "ye":True,
"no":False, "n":False}
if default == None:
choices = " [y/n] "
elif default in ["yes", "y"]:
choices = " [Y/n] "
elif default in ["no", "n"]:
choices = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
choice = query_choice("yn", prompt=prompt + choices,
default=default,
single_char=single_char, case_insensitive=True)
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
readline.parse_and_bind("tab: menu-complete")
# print query_yes_no(single_char=True)
# print query_choice(["foo", "bar", "baz"])
# print query_choice(["foo", "bar", "baz"], default="foo", allow_other=True)
| 3.640625 | 4 |
src/kaczmarz/_normalize.py | yotamyaniv/kaczmarz-algorithms | 3 | 12759522 | <filename>src/kaczmarz/_normalize.py<gh_stars>1-10
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as spla
def compute_row_norms(A):
"""Compute the norm of each row of a matrix.
Parameters
----------
A : (m, n) spmatrx or array_like
Returns
-------
row_norms : (m,) array
"""
if sp.issparse(A):
return spla.norm(A, axis=1)
return np.linalg.norm(A, axis=1)
def normalize_matrix(A, row_norms):
"""Normalize a matrix to have rows with norm 1.
Parameters
----------
A : (m, n) spmatrx or array
row_norms : (m,) array
Returns
-------
A_normalized : (m, n) spmatrx or array
"""
# Be careful! Do not try ``A / row_norms[:, None]`` with a sparse matrix!
# You will end up with a np.matrix rather than a sparse matrix.
normalization_matrix = sp.diags(1 / row_norms)
return normalization_matrix @ A
def normalize_system(A, b):
"""Scale the system ``A @ x = b`` so that the rows of ``A`` have norm 1.
Parameters
----------
A : (m, n) spmatrix or array_like
b : (m,) or (m, 1) array_like
Returns
-------
A_normalized : (m, n) array or spmatrx
Copy of ``A`` with rows scaled to have norm ``1``.
b_normalized : (m,) or (m, 1) array
Copy of ``b`` with entries divided by the row norms of ``A``.
"""
if not sp.issparse(A):
A = np.array(A)
row_norms = compute_row_norms(A)
A = normalize_matrix(A, row_norms=row_norms)
b = np.array(b).ravel() / row_norms
return A, b, row_norms
| 3.296875 | 3 |
network/utils/lmdb_to_tfrecords.py | andrey-dmitrov/SketchCNN | 82 | 12759523 | #
# Project SketchCNN
#
# Author: <NAME> (<EMAIL>),
# Copyright (c) 2018. All Rights Reserved.
#
# ==============================================================================
"""Convert LMDB to TFRecords
"""
import lmdb
import tensorflow as tf
import os
tfrecord_fn = r'path_to_tfrecord\train_db.tfrecords'
data_dir = r'path_to_lmdb'
def __bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def lmdb_to_TFRecords():
writer = tf.python_io.TFRecordWriter(tfrecord_fn)
# collect all lmdbs to write into one TFRecords (at least one lmdb)
db_paths = [os.path.join(data_dir, 'lmdb_0'), os.path.join(data_dir, 'lmdb_1'), os.path.join(data_dir, 'lmdb_2')]
for i in range(3):
env = lmdb.open(db_paths[i], readonly=True)
with env.begin() as txn:
with txn.cursor() as curs:
for key, value in curs:
print('put key: {} to train tfrecord'.format(key.decode('utf-8')))
feature = {
'name': __bytes_feature(key),
'block': __bytes_feature(value)
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
writer.close()
if __name__ == '__main__':
# Set GPU (could remove this setting when running on machine without GPU)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
lmdb_to_TFRecords()
| 2.3125 | 2 |
config/logger.py | Gumbew/mr-client | 0 | 12759524 | <filename>config/logger.py
import logging
import sys
from logging.handlers import RotatingFileHandler
from pathlib import Path
# Logger
MAX_LOG_SIZE = 1 * 10 ** 6 # 1MB
LOG_FILE_NAME = Path('logs', 'client.log')
LOG_FILE_NAME.parent.mkdir(parents=True, exist_ok=True)
class BaseLogger:
def __init__(self, log_file_name):
self.log_file_name = log_file_name
@staticmethod
def get_console_handler(formatter):
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
console_handler.setLevel(logging.DEBUG)
return console_handler
def get_file_handler(self, formatter):
file_handler = RotatingFileHandler(self.log_file_name, maxBytes=MAX_LOG_SIZE, backupCount=5)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.DEBUG)
return file_handler
def get_logger(self, logger_name):
formatter = logging.Formatter(fmt='[%(levelname)s] %(asctime)s %(name)s.%(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
logger.addHandler(self.get_console_handler(formatter))
logger.addHandler(self.get_file_handler(formatter))
return logger
client_logger = BaseLogger(LOG_FILE_NAME)
| 2.65625 | 3 |
click_man/__main__.py | berthin/click-man | 0 | 12759525 | <reponame>berthin/click-man<gh_stars>0
"""
click-man - Generate man pages for click application
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides a click CLI command to
generate man pages from a click application.
:copyright: (c) 2016 by <NAME>.
:license: MIT, see LICENSE for more details.
"""
import os
import click
from pkg_resources import iter_entry_points, get_distribution
from click_man.core import write_man_pages
@click.command(context_settings={'help_option_names': ['-h', '--help']})
@click.option('--target', '-t', default=os.path.join(os.getcwd(), 'man'),
type=click.Path(file_okay=False, dir_okay=True, resolve_path=True),
help='Target location for the man pages')
@click.version_option(get_distribution('click-man').version, '-V', '--version')
@click.argument('name')
def cli(target, name):
"""
Generate man pages for the scripts defined in the ``console_acripts`` entry point.
The cli application is gathered from entry points of installed packages.
The generated man pages are written to files in the directory given
by ``--target``.
"""
console_scripts = [ep for ep in iter_entry_points('console_scripts', name=name)]
if len(console_scripts) < 1:
raise click.ClickException('"{0}" is not an installed console script.'.format(name))
# Only generate man pages for first console script
entry_point = console_scripts[0]
# create target directory if it does not exist yet
try:
os.makedirs(target)
except OSError:
pass
click.echo('Load entry point {0}'.format(name))
cli = entry_point.resolve()
# If the entry point isn't a click.Command object, try to find it in the module
if not isinstance(cli, click.Command):
from importlib import import_module
from inspect import getmembers
if not entry_point.module_name:
raise click.ClickException('Could not find module name for "{0}".'.format(name))
ep_module = import_module(entry_point.module_name)
ep_members = getmembers(ep_module, lambda x: isinstance(x, click.Command))
if len(ep_members) < 1:
raise click.ClickException('Could not find click.Command object for "{0}".'.format(name))
(ep_name, cli) = ep_members[0]
click.echo('Found alternate entry point {0} in {1}'.format(ep_name, name))
click.echo('Generate man pages for {0} in {1}'.format(name, target))
write_man_pages(name, cli, version=entry_point.dist.version, target_dir=target)
if __name__ == '__main__':
cli()
| 2.453125 | 2 |
ORA_HandsOnMachineLearning/Chapter02/housing_data.py | ptracton/MachineLearning | 0 | 12759526 | <gh_stars>0
#! /usr/bin/env python3
import Housing
import os
import pandas as pd
HOUSING_CSV_PATH = os.getcwd()+"/../handson-ml/datasets/housing/"
HOUSING_CSV_FILE = "housing.csv"
def load_housing_data(housing_path=None):
"""
In a very unsafe manner load the house csv file into a pandas data frame
"""
csv_path = os.path.join(housing_path, HOUSING_CSV_FILE)
return pd.read_csv(csv_path)
#housingData = load_housing_data(HOUSING_CSV_PATH)
#print(housingData.head())
#print(housingData.info())
#print(housingData.describe())
#housingData.hist(bins=50, figsize=(20,15))
housing = Housing.Housing(path="../handson-ml/datasets/housing/")
housing.load_housing_data()
housing.add_id_column()
print(housing.dataFrame.head())
print(housing.dataFrame.info())
print(housing.dataFrame.describe())
housing.split_train_test(0.2)
print(len(housing.trainingData))
print(len(housing.testingData))
housing.split_train_test_by_id(0.2, "index")
print(housing.trainSet)
| 2.953125 | 3 |
corruptions/gaussian_noise_model_torch.py | m43/PointNav-VO | 0 | 12759527 | <gh_stars>0
#!/usr/bin/env python3
import attr
import torch
from habitat_sim.registry import registry
from habitat_sim.sensor import SensorType
from habitat_sim.sensors.noise_models.sensor_noise_model import SensorNoiseModel
@registry.register_noise_model
@attr.s(auto_attribs=True, kw_only=True, slots=True)
class GaussianNoiseModelTorch(SensorNoiseModel):
intensity_constant: float = 0.2
mean: int = 0
sigma: int = 1
@staticmethod
def is_valid_sensor_type(sensor_type: SensorType) -> bool:
return sensor_type == SensorType.COLOR
def simulate(self, image: torch.tensor) -> torch.tensor:
noise = (torch.randn(image.shape[0], image.shape[1],
image.shape[2], device=image.device) * self.sigma + self.mean) * self.intensity_constant
return (torch.maximum(torch.minimum(image / 255.0 + noise, torch.tensor(1.0, device=image.device)),
torch.tensor(0.0, device=image.device)) * 255.0)
def apply(self, image: torch.tensor) -> torch.tensor:
r"""Alias of `simulate()` to conform to base-class and expected API"""
return self.simulate(image)
| 2.15625 | 2 |
src/extensions/archive/archive_handler.py | SafEight/durkabot | 3 | 12759528 | import aiohttp
from urllib.parse import urljoin
from extensions.archive.archive_result import ArchiveResult
class ArchiveHandler:
archive_org_user_agent = "Durkabot (https://github.com/Durkastan/durkabot)"
domain = "http://web.archive.org"
save_url = urljoin(domain, 'save/')
def __init__(self, loop):
self.session = aiohttp.ClientSession(loop=loop)
async def _fetch(self, link, headers):
async with self.session.get(link, headers=headers) as response:
await response.read() # no awaitable method for headers :/
return response
async def archive(self, link) -> ArchiveResult:
request_url = self.save_url + link
response = await self._fetch(request_url, {'User-Agent': self.archive_org_user_agent})
# Error handling
if response.status in [403, 502]:
raise Exception(response.headers['X-Archive-Wayback-Runtime-Error'])
archive_result = self.process_result(response.headers)
return archive_result
@classmethod
def process_result(cls, headers):
archive_id = headers['Content-Location']
link = urljoin(cls.domain, archive_id)
# Determine if page is cached
cache_hit = headers.get('X-Page-Cache') == 'HIT'
archive_date = headers['X-Archive-Orig-Date']
return ArchiveResult(link, archive_date, cache_hit)
| 2.59375 | 3 |
var/spack/repos/builtin/packages/py-astropy/package.py | nkianggiss/spack | 3 | 12759529 | <filename>var/spack/repos/builtin/packages/py-astropy/package.py
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyAstropy(PythonPackage):
"""The Astropy Project is a community effort to develop a single core
package for Astronomy in Python and foster interoperability between
Python astronomy packages."""
homepage = 'http://www.astropy.org/'
url = 'https://pypi.io/packages/source/a/astropy/astropy-1.1.2.tar.gz'
version('1.1.2', 'cbe32023b5b1177d1e2498a0d00cda51')
version('1.1.post1', 'b52919f657a37d45cc45f5cb0f58c44d')
# Required dependencies
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
# Optional dependencies
depends_on('py-h5py', type=('build', 'run'))
depends_on('py-beautifulsoup4', type=('build', 'run'))
depends_on('py-pyyaml', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('libxml2')
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-pytz', type=('build', 'run'))
depends_on('py-scikit-image', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
depends_on('py-markupsafe', type=('build', 'run'))
# System dependencies
depends_on('cfitsio')
depends_on('expat')
def build_args(self, spec, prefix):
return ['--use-system-cfitsio', '--use-system-expat']
| 1.632813 | 2 |
TWLight/resources/management/commands/resources_example_data.py | jajodiaraghav/TWLight | 1 | 12759530 | import copy
from django_countries import countries
from faker import Faker
import random
import string
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
from TWLight.resources.factories import PartnerFactory, StreamFactory, VideoFactory, SuggestionFactory
from TWLight.resources.models import Language, Partner, Stream, Suggestion, AccessCode
class Command(BaseCommand):
help = "Adds a number of example resources, streams, suggestions, and tags."
def add_arguments(self, parser):
parser.add_argument('num', nargs='+', type=int)
def handle(self, *args, **options):
num_partners = options['num'][0]
tag_list = ["science", "humanities", "social science", "history",
"law", "video", "multidisciplinary"]
fake = Faker()
coordinators = User.objects.filter(groups__name='coordinators')
for _ in range(num_partners):
partner = PartnerFactory(
company_location = random.choice(list(countries)),
renewals_available = random.choice([True, False]),
short_description = fake.paragraph(nb_sentences=4),
send_instructions = fake.paragraph(nb_sentences=2),
coordinator = random.choice(coordinators),
real_name = self.chance(True, False, 40),
country_of_residence = self.chance(True, False, 20),
specific_title = self.chance(True, False, 10),
specific_stream = self.chance(True, False, 10),
occupation = self.chance(True, False, 10),
affiliation = self.chance(True, False, 10),
agreement_with_terms_of_use = self.chance(True, False, 10),
mutually_exclusive = False
)
# ManyToMany relationships can't be set until the partner object has
# been created.
random_languages = random.sample(Language.objects.all(),
random.randint(1,2)
)
for lang in random_languages:
partner.languages.add(lang)
partner.save()
all_partners = Partner.even_not_available.all()
for partner in all_partners:
for tag in random.sample(tag_list, random.randint(1,4)):
partner.tags.add(tag)
# Set 5 partners to need a registration URL. We do this separately
# because it requires both the account_email and registration_url
# fields to be set concurrently.
for registration_partner in random.sample(all_partners, 5):
registration_partner.account_email = True
registration_partner.registration_url = fake.uri()
registration_partner.save()
# While most fields can be set at random, we want to make sure we
# get partners with certain fields set to particular values.
# Set 5 random partners to be unavailable
for unavailable_partner in random.sample(all_partners, 5):
unavailable_partner.status = Partner.NOT_AVAILABLE
unavailable_partner.save()
# Set 5% random partners to have excerpt limit in words
for words in random.sample(all_partners, 10):
words.excerpt_limit = random.randint(100, 250)
words.save()
# Set 5% random partners to have excerpt limit in words
for percentage in random.sample(all_partners, 10):
percentage.excerpt_limit_percentage = random.randint(5, 50)
percentage.save()
# Set 1 random partner to have excerpt limits both in words and percentage
for percentage_words in random.sample(all_partners, 1):
percentage_words.excerpt_limit_percentage = random.randint(5, 50)
percentage_words.excerpt_limit = random.randint(100, 250)
percentage_words.save()
available_partners = all_partners.exclude(status= Partner.NOT_AVAILABLE)
# Set 10 random available partners to be waitlisted
for waitlisted_partner in random.sample(available_partners, 10):
waitlisted_partner.status = Partner.WAITLIST
waitlisted_partner.save()
# Set 25 random partners to have a long description
for long_description in random.sample(all_partners, 25):
long_description.description = fake.paragraph(nb_sentences = 10)
long_description.save()
# Set 10 random available partners to be featured
for featured_partner in random.sample(available_partners, 10):
featured_partner.featured = True
featured_partner.save()
# Give any specific_stream flagged partners streams.
stream_partners = all_partners.filter(specific_stream=True)
# Random number of accounts available for all partners without streams
for accounts in all_partners:
if not accounts.specific_stream:
accounts.accounts_available = random.randint(10, 550)
accounts.save()
# If we happened to not create any partners with streams,
# create one deliberately.
if stream_partners.count() == 0:
stream_partners = random.sample(all_partners, 1)
stream_partners[0].specific_stream = True
stream_partners[0].save()
for partner in stream_partners:
for _ in range(3):
stream = StreamFactory(
partner= partner,
name= fake.sentence(nb_words= 3)[:-1], # [:-1] removes full stop
description= fake.paragraph(nb_sentences=2)
)
# Set 15 partners to have somewhere between 1 and 5 video tutorial URLs
for partner in random.sample(all_partners, 15):
for _ in range(random.randint(1, 5)):
VideoFactory(
partner = partner,
tutorial_video_url = fake.url()
)
# Random number of accounts available for all streams
all_streams = Stream.objects.all()
for each_stream in all_streams:
each_stream.accounts_available = random.randint(10, 100)
each_stream.save()
#Generate a few number of suggestions with upvotes
all_users = User.objects.exclude(is_superuser=True)
author_user = random.choice(all_users)
for _ in range(random.randint(3, 10)):
suggestion = SuggestionFactory(
description = fake.paragraph(nb_sentences=10),
author = author_user
)
suggestion.save()
suggestion.upvoted_users.add(author_user)
random_users = random.sample(all_users, random.randint(1, 10))
suggestion.upvoted_users.add(*random_users)
# Set 5 partners use the access code authorization method,
# and generate a bunch of codes for each.
for partner in random.sample(available_partners, 5):
partner.authorization_method = Partner.CODES
partner.save()
for i in range(25):
new_access_code = AccessCode()
new_access_code.code = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
new_access_code.partner = partner
new_access_code.save()
# Set 5 partners use the access code authorization method,
# and generate a bunch of codes for each.
for partner in random.sample(available_partners, 5):
partner.authorization_method = Partner.CODES
partner.save()
for i in range(25):
new_access_code = AccessCode()
new_access_code.code = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
new_access_code.partner = partner
new_access_code.save()
def chance(self, selected, default, chance):
# A percentage chance to select something, otherwise selects
# the default option. Used to generate data that's more
# in line with the live site distribution.
roll = random.randint(0,100)
if roll < chance:
selection = selected
else:
selection = default
return selection
| 2.0625 | 2 |
plotting/plot_dlogp_selective.py | sjforeman/RadioFisher | 3 | 12759531 | #!/usr/bin/python
"""
Process EOS Fisher matrices and plot P(k).
"""
import numpy as np
import pylab as P
from rfwrapper import rf
import matplotlib.patches
import matplotlib.cm
from units import *
from mpi4py import MPI
import os
import euclid
cosmo = rf.experiments.cosmo
names = ['SKAMID_PLUS', 'SKAMID_PLUS2', 'iSKAMID_PLUS', 'iSKAMID_PLUS2']
colours = ['#CC0000', '#1619A1', '#5B9C0A', '#990A9C'] # DETF/F/M/S
labels = ['MID Band 1', 'MID Band 2', 'Int. MID Band 1', 'Int. MID Band 2']
linestyle = [[2, 4, 6, 4], [1,0], [8, 4], [3, 4]]
bins = [[2, 7, 11], [1, 6], [2, 7, 11], [1, 6]] # Which bins to plot
# Get f_bao(k) function
cosmo = rf.load_power_spectrum(cosmo, "cache_pk.dat", force_load=True)
fbao = cosmo['fbao']
# Fiducial value and plotting
P.subplot(111)
for k in range(len(names)):
root = "output/" + names[k]
# Load cosmo fns.
dat = np.atleast_2d( np.genfromtxt(root+"-cosmofns-zc.dat") ).T
zc, Hc, dAc, Dc, fc = dat
z, H, dA, D, f = np.genfromtxt(root+"-cosmofns-smooth.dat").T
kc = np.genfromtxt(root+"-fisher-kc.dat").T
print 1420./(1.+zc)
# Load Fisher matrices as fn. of z
Nbins = zc.size
F_list = [np.genfromtxt(root+"-fisher-full-%d.dat" % i) for i in range(Nbins)]
# EOS FISHER MATRIX
# Actually, (aperp, apar) are (D_A, H)
pnames = rf.load_param_names(root+"-fisher-full-0.dat")
ppk = rf.indices_for_param_names(pnames, 'pk*')
cmap = matplotlib.cm.Blues_r
for j in range(len(F_list))[::-1]:
F = F_list[j]
# Just do the simplest thing for P(k) and get 1/sqrt(F)
cov = np.sqrt(1. / np.diag(F)[ppk])
pk = cosmo['pk_nobao'](kc) * (1. + fbao(kc))
# Replace nan/inf values
cov[np.where(np.isnan(cov))] = 1e10
cov[np.where(np.isinf(cov))] = 1e10
# Line with fading colour
alpha = 1. - (0.9*j / len(F_list))
if j in bins[k]:
line = P.plot(kc, cov, color=colours[k], lw=2., alpha=alpha,
label=r"z=%2.2f ($\nu$=%d MHz)" % (zc[j], 1420./(1.+zc[j])))
print zc[j]
# Label for min/max redshifts
#N = kc.size
#if j == 0:
# P.annotate("z = %3.2f" % zc[j], xy=(kc[N/2+5], cov[N/2+5]),
# xytext=(65., -60.), fontsize='large',
# textcoords='offset points', ha='center', va='center',
# arrowprops={'width':1.8, 'color':'#1619A1', 'shrink':0.05} )
#if j == len(F_list) - 1:
# P.annotate("z = %3.2f" % zc[j], xy=(kc[N/2], cov[N/2]),
# xytext=(-65., 60.), fontsize='large',
# textcoords='offset points', ha='center', va='center',
# arrowprops={'width':1.8, 'color':'#1619A1', 'shrink':0.07} )
# Plot the summed constraint (over all z)
#F, lbls = rf.combined_fisher_matrix(F_list, expand=[], names=pnames, exclude=[])
#cov = np.sqrt(1. / np.diag(F)[ppk])
#pk = cosmo['pk_nobao'](kc) * (1. + fbao(kc))
# Replace nan/inf values
#cov[np.where(np.isnan(cov))] = 1e10
#cov[np.where(np.isinf(cov))] = 1e10
# Line with fading colour
#line = P.plot(kc, cov, color='k', lw=3.)
# Set custom linestyle
#line[0].set_dashes(linestyle[k])
P.xscale('log')
P.yscale('log')
P.xlim((2e-3, 3e0))
P.ylim((9e-4, 1e1))
P.legend(loc='lower left', prop={'size':'medium'}, frameon=False, ncol=2)
P.title("Germany 4 - 0 Portugal")
P.tick_params(axis='both', which='major', labelsize=20, size=8., width=1.5, pad=8.)
P.tick_params(axis='both', which='minor', labelsize=20, size=5., width=1.2)
P.xlabel(r"$k \,[\mathrm{Mpc}^{-1}]$", fontdict={'fontsize':'xx-large'}, labelpad=10.)
P.ylabel(r"$\Delta P / P$", fontdict={'fontsize':'xx-large'}, labelpad=10.)
P.tight_layout()
# Set size
P.gcf().set_size_inches(8.,6.)
#P.savefig('pub-dlogp-fnz.pdf', transparent=True) # 100
P.savefig('MID_constraints.pdf', transparent=True)
P.show()
| 1.953125 | 2 |
evernode/classes/session.py | AtomHash/evernode | 1 | 12759532 | """
Static methods to help handle state-less app sessions
"""
from flask import current_app, g
from ..classes.security import Security
from ..models.session_model import SessionModel
class Session:
""" Helper class for app state-less sessions """
@staticmethod
def create_session_id() -> str:
""" Create a session token """
return Security.generate_uuid(2)
@staticmethod
def set_current_session(session_id) -> bool:
""" Add session_id to flask globals for current request """
try:
g.session_id = session_id
return True
except (Exception, BaseException) as error:
# catch all on config update
if current_app.config['DEBUG']:
print(error)
return False
@staticmethod
def current_session() -> str:
""" Return session id in app globals, only current request """
session_id = getattr(g, 'session_id', None)
if session_id is not None:
return SessionModel.where_session_id(session_id)
return None
@classmethod
def create_session(cls, session_id, user_id):
"""
Save a new session to the database
Using the ['AUTH']['MAX_SESSIONS'] config setting
a session with be created within the MAX_SESSIONS
limit. Once this limit is hit, delete the earliest
session.
"""
count = SessionModel.count(user_id)
if count < current_app.config['AUTH']['MAX_SESSIONS']:
cls.__save_session(session_id, user_id)
return
elif count >= current_app.config['AUTH']['MAX_SESSIONS']:
earliest_session = SessionModel.where_earliest(user_id)
earliest_session.delete()
cls.__save_session(session_id, user_id)
return
@classmethod
def __save_session(cls, session_id, user_id):
session = SessionModel()
session.user_id = user_id
session.session_id = session_id
Session.set_current_session(session_id)
session.save()
| 3.09375 | 3 |
paperclip/routes/main.py | radoslawsobieralski/paperclip | 1 | 12759533 | <reponame>radoslawsobieralski/paperclip
from datetime import datetime
from flask import Blueprint, render_template, request, redirect, url_for, flash
from flask_login import login_required, current_user
from sqlalchemy import asc, desc
from paperclip.models import Invoice, Currency, Payment, Category
from paperclip.extensions import db
main = Blueprint("main", __name__)
@main.route("/", methods=["GET", "POST"])
@login_required
def index():
this_month = datetime.today().month
months = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
last_12_months = months[this_month:] + months[:this_month]
last_6_months = last_12_months[-6:]
invoices = Invoice.query.all()
invoices_newest_10_desc = Invoice.query.order_by(desc(Invoice.date)).limit(10).all()
currencies = Currency.query.all()
payments = Payment.query.all()
categories = Category.query.all()
costs_by_month = Invoice.get_costs_by_month()
costs_by_year = Invoice.get_costs_by_year()
invoices_by_month = Invoice.get_invoices_by_month()
invoice_by_year = Invoice.get_invoices_by_year()
costs_per_month = Invoice.costs_per_month()
costs_per_year = Invoice.costs_per_year()
invoices_per_month = Invoice.invoices_per_month()
invoices_per_year = Invoice.invoices_per_year()
invoices_monthly_array = [invoice[1] for invoice in invoices_by_month[-12:]]
costs_monthly_array = [round(cost[2], 2) for cost in costs_by_month[-12:]]
context = {
"invoices": invoices,
"currencies": currencies,
"payments": payments,
"categories": categories,
"invoices_newest_10_desc": invoices_newest_10_desc,
"costs_last_month": costs_by_month[-1][2],
"costs_last_year": costs_by_year[-1][1],
"invoices_last_month": invoices_by_month[-1][2],
"invoices_last_year": invoice_by_year[-1][1],
"current_user": current_user,
"costs_per_month": costs_per_month,
"costs_per_year": costs_per_year,
"invoices_per_month": invoices_per_month,
"invoices_per_year": invoices_per_year,
"last_12_months": last_12_months,
"last_6_months": last_6_months,
"invoices_monthly_array": invoices_monthly_array[-6:],
"costs_monthly_array": costs_monthly_array[-6:],
}
return render_template("index.html", **context)
@main.route("/invoices", methods=["GET", "POST"])
@login_required
def invoices():
invoices = Invoice.query.order_by(desc(Invoice.id)).all()
currencies = Currency.query.all()
payments = Payment.query.all()
categories = Category.query.all()
context = {
"invoices": invoices,
"currencies": currencies,
"payments": payments,
"categories": categories,
}
return render_template("invoices.html", **context)
@main.route("/add_invoice", methods=["GET", "POST"])
@login_required
def add_invoice():
if request.method == "POST":
date = request.form["date"]
name = request.form["name"]
value = request.form["value"]
currency = request.form["currency"]
payment = request.form["payment"]
category = request.form["category"]
description = request.form["description"]
invoice = Invoice(
date=datetime.strptime(date, "%Y-%m-%d"),
name=name,
value=value,
currency=currency,
payment=payment,
category=category,
description=description,
)
db.session.add(invoice)
db.session.commit()
flash("Invoice added successfully!")
return redirect(url_for("main.invoices"))
@main.route("/edit_invoice", methods=["GET", "POST"])
@login_required
def edit_invoice():
if request.method == "POST":
invoices = Invoice.query.get(request.form.get("id"))
invoices.date = datetime.strptime(request.form["date"], "%Y-%m-%d")
invoices.name = request.form["name"]
invoices.value = request.form["value"]
invoices.currency = request.form["currency"]
invoices.payment = request.form["payment"]
invoices.category = request.form["category"]
invoices.description = request.form["description"]
db.session.commit()
flash("Invoice edited successfully!")
return redirect(url_for("main.invoices"))
@main.route("/delete_invoice", methods=["GET", "POST"])
@login_required
def delete_invoice():
if request.method == "POST":
invoices = Invoice.query.get(request.form.get("id"))
db.session.delete(invoices)
db.session.commit()
flash("Invoice deleted successfully!")
return redirect(url_for("main.invoices"))
@main.route("/reports")
@login_required
def reports():
return render_template("reports.html")
| 2.21875 | 2 |
prob53.py | Ziggareto/project_euler_solns | 0 | 12759534 | def fact(n):
if n == 0:
return(1)
return(n*fact(n-1))
def ncr(n, r):
return(fact(n)/(fact(r)*fact(n-r)))
million = 1000000
n = 0
a = 0
comp = 0
for n in range(100, 0, -1):
for r in range(a, n):
if ncr(n,r) > million:
comp += n-2*r + 1
a = r-1
break
print(n)
print("comp=" + str(comp))
| 3.25 | 3 |
envs/simple_gridworld_env.py | petros94/monte-carlo-gridworld | 0 | 12759535 | <filename>envs/simple_gridworld_env.py
import random
import gym
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use("TkAgg")
plt.ion()
rotate_cw = np.matrix([[0, -1], [1, 0]])
rotate_ccw = np.matrix([[0, 1], [-1, 0]])
class SimpleGridworldEnv(gym.Env):
def __init__(self):
self.height = 3
self.width = 4
self.moves = {
0: (-1, 0), # up
1: (0, 1), # right
2: (1, 0), # down
3: (0, -1), # left
}
# initialize plot
self.init_plot()
# begin in start state
self.reset()
def step(self, action):
movement_vector = np.array(self.moves[action])
# Stochastic environment
p = random.uniform(0, 1)
if 0.8 < p <= 0.9:
movement_vector = np.array(movement_vector*rotate_cw)[0]
elif 0.9 < p:
movement_vector = np.array(movement_vector*rotate_ccw)[0]
s_ = self.s + movement_vector
# position (1,1) is blocked
if (s_ != (1, 1)).any():
self.s = s_
# stay inside map
self.s = max(0, self.s[0]), max(0, self.s[1])
self.s = (min(self.s[0], self.height - 1),
min(self.s[1], self.width - 1))
# check for terminal states
if self.s == (0, 3):
return self.s, 1, True, {}
elif self.s == (1, 3):
return self.s, -1, True, {}
return self.s, -0.04, False, {}
def reset(self):
self.s = (2, 0)
self.render()
return self.s
def render(self, mode="human"):
black = [0, 0, 0]
white = [255, 255, 255]
red = [255, 0, 0]
green = [0, 255, 0]
blue = [0, 0, 255]
world = [[white, white, white, green],
[white, black, white, red],
[white, white, white, white]]
y, x = self.s
world[y][x] = blue
self.im.set_data(world)
self.fig.canvas.draw_idle()
plt.pause(0.001)
def init_plot(self):
black = [0, 0, 0]
white = [255, 255, 255]
red = [255, 0, 0]
green = [0, 255, 0]
world = np.array([[white, white, white, green],
[white, black, white, red],
[white, white, white, white]])
self.fig, self.ax = plt.subplots(1, 1)
self.im = self.ax.imshow(world)
plt.show()
| 2.984375 | 3 |
npktest.py | lqez/pynpk | 5 | 12759536 | from pytest import raises
import npk
filename = "testres/sample.npk"
sample = "testres/sample.txt"
key = (98521, 16322, 7163, 992)
class TestNpk(object):
def test_open_package(self):
pack = npk.package(filename, key)
pack.close()
def test_create_package(self):
pack = npk.package()
pack.add(sample)
pack.save("test.npk")
pack.close()
def test_open_package_fail(self):
with raises(npk.FailToOpenPackage):
npk.package(filename, reversed(key))
def test_iterate_entities(self):
pack = npk.package(filename, key)
entities = pack.all()
entities_expected = ['sample.txt', 'tea.txt', 'zip.txt', 'zipntea.txt']
assert len(entities) == 4
assert set(sorted([str(x) for x in entities])) == set(sorted(entities_expected))
pack.close()
def test_get_entity(self):
pack = npk.package(filename, key)
for entity in pack.all():
assert entity.read() == open(sample).read()
pack.close()
def test_export_entity(self, tmpdir):
pack = npk.package(filename, key)
for entity in pack.all():
export_filename = str(tmpdir.join(entity.name()))
entity.export(export_filename)
assert open(export_filename).read() == open(sample).read()
pack.close()
def test_get_entity_fail(self):
pack = npk.package(filename, key)
with raises(npk.EntityNotFound):
pack.get("notfound.42")
pack.close()
| 2.359375 | 2 |
src/sig_load.py | wykys/MIKS-FSK | 0 | 12759537 | #!/usr/bin/env python3
# wykys 2019
from numpy import ndarray
from awgn import awgn
WAV_PATH = '../wav/'
def wav(path: str, snr_db: float = None) -> list:
from scipy.io import wavfile
fs, s = wavfile.read(WAV_PATH + path)
if not (snr_db is None):
s = awgn(s, snr_db)
s = s / s.max()
return fs, s
| 2.6875 | 3 |
logdevice/ops/ldops/util/tests/test_helpers.py | dmitris/LogDevice | 0 | 12759538 | <filename>logdevice/ops/ldops/util/tests/test_helpers.py
#!/usr/bin/env python3
# pyre-strict
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Unit tests for `ldops.util.helpers` module.
"""
from unittest import TestCase
from ldops.const import ALL_SHARDS
from ldops.util import helpers
from logdevice.admin.common.types import NodeID, ShardID
class HelpersTestCase(TestCase):
def test_parse_shards_empty(self) -> None:
"""
Empty input should produce empty output
"""
self.assertEqual(set(), helpers.parse_shards([]))
def test_parse_shards_invalid(self) -> None:
"""
Invalid shards should throw ValueError exceptions.
"""
# N0:S1, N0, or 0 format should be accepted
with self.assertRaises(ValueError):
print(helpers.parse_shards([":S2"]))
with self.assertRaises(ValueError):
print(helpers.parse_shards(["N:S2"]))
with self.assertRaises(ValueError):
print(helpers.parse_shards(["X0"]))
with self.assertRaises(ValueError):
helpers.parse_shards(["N0:B1"])
with self.assertRaises(ValueError):
helpers.parse_shards(["N0:S1X"])
def test_parse_shards_valid1(self) -> None:
# 5
self.assertEqual(
{ShardID(node=NodeID(node_index=5), shard_index=ALL_SHARDS)},
helpers.parse_shards(["5"]),
)
# 5:1
self.assertEqual(
{ShardID(node=NodeID(node_index=5), shard_index=1)},
helpers.parse_shards(["5:1"]),
)
# 0:S1
self.assertEqual(
{ShardID(node=NodeID(node_index=0), shard_index=1)},
helpers.parse_shards(["0:S1"]),
)
# N0:S1
self.assertEqual(
{ShardID(node=NodeID(node_index=0), shard_index=1)},
helpers.parse_shards(["N0:S1"]),
)
# N0 == ShardID(0, ALL_SHARDS)
self.assertEqual(
{ShardID(node=NodeID(node_index=0), shard_index=ALL_SHARDS)},
helpers.parse_shards(["N0"]),
)
# N1:S4 == ShardID(1, 4)
self.assertEqual(
{ShardID(node=NodeID(node_index=1), shard_index=4)},
helpers.parse_shards(["N1:S4"]),
)
# Allow ignored case
# n1:S4 == ShardID(1, 4)
self.assertEqual(
{ShardID(node=NodeID(node_index=1), shard_index=4)},
helpers.parse_shards(["n1:S4"]),
)
def test_parse_shards_valid2(self) -> None:
# Parse multiple inputs
self.assertEqual(
{
ShardID(node=NodeID(node_index=0), shard_index=1),
ShardID(node=NodeID(node_index=1), shard_index=2),
},
helpers.parse_shards(["N0:S1", "N1:S2"]),
)
# Remove duplicates
self.assertEqual(
{
ShardID(node=NodeID(node_index=0), shard_index=1),
ShardID(node=NodeID(node_index=1), shard_index=2),
},
helpers.parse_shards(["N0:S1", "N1:S2", "N0:s1"]),
)
| 2.484375 | 2 |
slise/__init__.py | vishalbelsare/pyslise | 3 | 12759539 | """
__ SLISE - Sparse Linear Subset Explanations __
The SLISE algorithm can be used for both robust regression and to explain outcomes from black box models.
In robust regression we fit regression models that can handle data that
contains outliers. SLISE accomplishes this by fitting a model such that
the largest possible subset of the data items have an error less than a
given value. All items with an error larger than that are considered
potential outliers and do not affect the resulting model.
SLISE can also be used to provide local model-agnostic explanations for
outcomes from black box models. To do this we replace the ground truth
response vector with the predictions from the complex model. Furthermore, we
force the model to fit a selected item (making the explanation local). This
gives us a local approximation of the complex model with a simpler linear
model. In contrast to other methods SLISE creates explanations using real
data (not some discretised and randomly sampled data) so we can be sure that
all inputs are valid (i.e. in the correct data manifold, and follows the
constraints used to generate the data, e.g., the laws of physics).
More in-depth details about the algorithm can be found in the paper:
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>.
Sparse Robust Regression for Explaining Classifiers.
Discovery Science (DS 2019).
Lecture Notes in Computer Science, vol 11828, Springer.
https://doi.org/10.1007/978-3-030-33778-0_27
"""
from slise.slise import SliseRegression, regression, SliseExplainer, explain
from slise.utils import limited_logit as logit
from slise.data import normalise_robust
| 3.28125 | 3 |
anim_utils/utilities/log.py | jsprenger2/anim_utils | 10 | 12759540 | <filename>anim_utils/utilities/log.py
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
LOG_MODE_ERROR = -1
LOG_MODE_INFO = 1
LOG_MODE_DEBUG = 2
_lines = []
_active = True
_mode = LOG_MODE_INFO
def activate():
global _active
_active = True
def deactivate():
global _active
_active = False
def set_log_mode(mode):
global _mode
_mode = mode
def write_log(*args):
global _active
global _lines
if _active:
line = " ".join(map(str, args))
print(line)
_lines.append(line)
def write_message_to_log(message, mode=LOG_MODE_INFO):
global _active
global _lines
if _active and _mode >= mode:
print(message)
_lines.append(message)
def save_log(filename):
global _lines
with open(filename, "wb") as outfile:
for l in _lines:
outfile.write(l+"\n")
def clear_log():
global _lines
_lines = []
| 1.9375 | 2 |
get_lists.py | StupidHackathonAMS/bestaat-mijn-gemeente-nog | 0 | 12759541 | <reponame>StupidHackathonAMS/bestaat-mijn-gemeente-nog<filename>get_lists.py
#!/usr/bin/env python
import os
import sys
import re
from pprint import pprint
import json
import requests
from lxml import etree
def get_possible_lists():
html = etree.HTML(
requests.get('https://dataderden.cbs.nl/ODataFeed/').content)
return html.xpath('//a/@href')
def get_gemeenten(link):
resp = requests.get(
'https://dataderden.cbs.nl%s/Gemeenten?$format=json' % (link,))
if resp.status_code >= 200 and resp.status_code < 300:
return resp.json()
def get_year(link):
# https://dataderden.cbs.nl/ODataFeed/OData/45001NED/TableInfos?$format=json
resp = requests.get(
'https://dataderden.cbs.nl%s/TableInfos?$format=json' % (link,))
if resp.status_code >= 200 and resp.status_code < 300:
return resp.json()['value'][0]['Period']
def main(argv):
yearly = {}
links = get_possible_lists()
for link in links:
result = get_gemeenten(link)
if result is not None:
year_str = get_year(link)
try:
year = int(year_str)
except ValueError:
year = None
if year is not None:
yearly[year] = result
for year in sorted(yearly.keys()):
print(year)
with open('cache/%s.json' % (year,), 'w') as out_file:
json.dump(yearly[year], out_file)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 2.8125 | 3 |
Movement.py | kclark-jenkins/connorbot-server | 0 | 12759542 | <reponame>kclark-jenkins/connorbot-server
class Movement:
def __init__(self):
print "Movement constructor"
def move(self, direction, speed):
return "Implement motor code from pi prototype" | 2.34375 | 2 |
lambda/bibot_config.py | bhupeshwar/bp-amazon-lex-bi-bot | 0 | 12759543 | <gh_stars>0
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
ORIGINAL_VALUE = 0
TOP_RESOLUTION = 1
SLOT_CONFIG = {
'event_name': {'type': TOP_RESOLUTION, 'remember': True, 'error': 'I couldn\'t find an event called "{}".'},
'event_month': {'type': ORIGINAL_VALUE, 'remember': True},
'venue_name': {'type': ORIGINAL_VALUE, 'remember': True},
'venue_city': {'type': ORIGINAL_VALUE, 'remember': True},
'venue_state': {'type': ORIGINAL_VALUE, 'remember': True},
'cat_desc': {'type': TOP_RESOLUTION, 'remember': True, 'error': 'I couldn\'t find a category called "{}".'},
'count': {'type': ORIGINAL_VALUE, 'remember': True},
'dimension': {'type': ORIGINAL_VALUE, 'remember': True},
'one_event': {'type': TOP_RESOLUTION, 'remember': False, 'error': 'I couldn\'t find an event called "{}".'},
'another_event': {'type': TOP_RESOLUTION, 'remember': False, 'error': 'I couldn\'t find an event called "{}".'},
'one_venue': {'type': ORIGINAL_VALUE, 'remember': False},
'another_venue': {'type': ORIGINAL_VALUE, 'remember': False},
'one_month': {'type': ORIGINAL_VALUE, 'remember': False},
'another_month': {'type': ORIGINAL_VALUE, 'remember': False},
'one_city': {'type': ORIGINAL_VALUE, 'remember': False},
'another_city': {'type': ORIGINAL_VALUE, 'remember': False},
'one_state': {'type': ORIGINAL_VALUE, 'remember': False},
'another_state': {'type': ORIGINAL_VALUE, 'remember': False},
'one_category': {'type': TOP_RESOLUTION, 'remember': False, 'error': 'I couldn\'t find a category called "{}".'},
'another_category': {'type': TOP_RESOLUTION, 'remember': False, 'error': 'I couldn\'t find a category called "{}".'}
}
DIMENSIONS = {
'events': {'slot': 'event_name', 'column': 'e.event_name', 'singular': 'event'},
'months': {'slot': 'event_month', 'column': 'd.month', 'singular': 'month'},
'venues': {'slot': 'venue_name', 'column': 'v.venue_name', 'singular': 'venue'},
'cities': {'slot': 'venue_city', 'column': 'v.venue_city', 'singular': 'city'},
'states': {'slot': 'venue_state', 'column': 'v.venue_state', 'singular': 'state'},
'categories': {'slot': 'cat_desc', 'column': 'c.cat_desc', 'singular': 'category'}
}
class SlotError(Exception):
pass
| 1.585938 | 2 |
sydent/http/servlets/hashdetailsservlet.py | callahad/sydent | 0 | 12759544 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from twisted.web.resource import Resource
from sydent.http.auth import authV2
import logging
from sydent.http.servlets import jsonwrap, send_cors
logger = logging.getLogger(__name__)
class HashDetailsServlet(Resource):
isLeaf = True
known_algorithms = ["sha256", "none"]
def __init__(self, syd, lookup_pepper):
self.sydent = syd
self.lookup_pepper = lookup_pepper
@jsonwrap
def render_GET(self, request):
"""
Return the hashing algorithms and pepper that this IS supports. The
pepper included in the response is stored in the database, or
otherwise generated.
Returns: An object containing an array of hashing algorithms the
server supports, and a `lookup_pepper` field, which is a
server-defined value that the client should include in the 3PID
information before hashing.
"""
send_cors(request)
authV2(self.sydent, request)
return {
"algorithms": self.known_algorithms,
"lookup_pepper": self.lookup_pepper,
}
def render_OPTIONS(self, request):
send_cors(request)
return b""
| 1.96875 | 2 |
test.py | saturn-drm/PythonMDtoHTML | 0 | 12759545 | #%%
from frontmatter import Frontmatter
import markdown
fp = 'testtemplate.md'
# %%
head_body = Frontmatter.read_file(fp)
type(head_body['body'])
# %%
bodyhtml = markdown.markdown(head_body['body'], extensions=['toc', 'tables','fenced_code'])
# bodyhtml = markdown.markdown(head_body['body'], extensions=['toc', 'tables','fenced_code', 'codehilite'])
bodyhtml
# %%
ofp = 'test.html'
of = open(ofp,'w',encoding='utf-8',errors='xmlcharrefreplace')
of.write(bodyhtml)
of.close()
# %%
md = markdown.Markdown(extensions=['toc', 'tables','fenced_code'])
# need fenced_code here too
# %%
bodytoc = md.convert(head_body['body'])
# bodytoc
bodyhtml == bodytoc
# %%
md.toc
# %%
with open('test.html','r+',encoding='utf-8',errors='xmlcharrefreplace') as f:
old = f.read()
f.seek(0)
f.write(md.toc)
f.write(old)
f.close()
#%%
from bs4 import BeautifulSoup
htmlfp = '../saturn-drmtest.github.io/layout/articletest.html'
soup = BeautifulSoup(open(htmlfp).read(), "html.parser")
soup.title
# %%
type(soup.title.string)
# %%
soup.title.string = 'new title'
soup.title
# %%
soup = BeautifulSoup('<div id="content"></div>', "html.parser")
targetdiv = soup.find(id='content')
targetdiv.insert(0, tempcontent[1])
targetdiv
# %%
html = '''
<div id="offsetheader">
<img src="/assets/img/covers/codingcover.jpg"/>
</div>
'''
headImgSrc = '/assests/img/covers/architecturecover.jpg'
soup = BeautifulSoup(html, "html.parser")
targetDiv = soup.find(id='offsetheader')
targetDiv.img['src'] = headImgSrc
targetDiv
# %%
import os
class filepaths():
def __init__(self, orifp, desfolder):
self.path = orifp
self.fileList = []
self.validFileList = []
self.desFileDict = {}
self.desfolder = desfolder
def getFiles(self):
for root, subFolders, files in os.walk(self.path):
for fileName in files:
self.fileList.append(os.path.join(root, fileName))
def validFiles(self):
for fileName in self.fileList:
clearFileName = os.path.basename(fileName)
subfolderFileName = '/'.join(fileName.split('/')[3:])
htmlBaseName = os.path.splitext(subfolderFileName)[0] + '.html'
if clearFileName == '.DS_Store':
pass
elif os.path.exists(os.path.join(self.desfolder, htmlBaseName)):
pass
else:
self.validFileList.append(fileName)
self.desFileDict[fileName] = os.path.join(self.desfolder, htmlBaseName)
def getFilePaths(self):
return self.fileList
def getValidFileNames(self):
return self.validFileList
filepathclass = filepaths('../saturn-drmtest.github.io/posts', '../saturn-drmtest.github.io/postshtml')
filepathclass.getFiles()
filepathclass.validFiles()
# filepathclass.validFileList
dic = filepathclass.desFileDict
# %%
clearFileName = os.path.basename('../saturn-drmtest.github.io/posts/01blog/01digest/2020-01-26-资治通鉴.md')
clearFileName
# %%
subfolderFileName = '/'.join('../saturn-drmtest.github.io/posts/01blog/01digest/2020-01-26-资治通鉴.md'.split('/')[3:])
subfolderFileName
# %%
htmlBaseName = os.path.splitext(clearFileName)[0] + '.html'
htmlBaseName
# %%
os.path.exists(os.path.join('../saturn-drmtest.github.io/postshtml', htmlBaseName))
# %%
print('Converting %s' % os.path.basename('../saturn-drmtest.github.io/posts/01blog/01digest/2020-01-26-资治通鉴.md'))
# %%
def insertDiv(modifiedSoup, id=''):
targetDiv = soup.find(id=id)
targetDiv.clear()
targetDiv.insert(0, modifiedSoup)
htmltxt = '''
<h1 class="anchor" id="head1">head1</h1>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et
dolore
magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea
commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu
fugiat
nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt
mollit
anim id est laborum.</p>
<h2 id="subhead1">subhead1</h2>
<p>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et
dolore
magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea
commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu
fugiat
nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt
mollit
anim id est laborum.</p>
'''
htmlfp = '../saturn-drmtest.github.io/layout/article.html'
soup = BeautifulSoup(open(htmlfp).read(), "html.parser")
insertDiv(BeautifulSoup(htmltxt, 'html.parser'), id='content')
soup
# %%
from bs4 import BeautifulSoup
soup = BeautifulSoup("<b>stop</b>")
tag = soup.new_tag('h1')
tag.string = "Don't"
soup.find('b').string.insert_before(tag)
soup.b
# %%
html = '''
<div id="content">
<div id="post"><h1>title</h1></div>
</div>
'''
soup = BeautifulSoup(html, 'html.parser')
tag = soup.new_tag('h1')
tag.string = 'title2'
targettag = soup.find(id='post')
targettag.insert(0, tag)
soup
# %%
| 2.640625 | 3 |
back-end/RawFishSheep/app_order/views.py | Coldarra/RawFishSheep | 0 | 12759546 | from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.hashers import make_password, check_password
import datetime
import time
import random
from decorator import *
from app_cart.views import *
from .models import *
def getOrderByUser(user_id=None, mode="all"):
if user_id == None:
raise ParamException()
if mode == "all":
orders = Order.objects.filter(
user_id=user_id, isdelete='0').order_by("-id")
elif mode == "unfinished":
orders = Order.objects.filter(
user_id=user_id, isdelete='0', status__in=['processing', 'examining', 'preparing', 'delivering', 'delivered'])
elif mode == "finished":
orders = Order.objects.filter(
user_id=user_id, isdelete='0', status='confirmed')
return orders
def getOrderByMode(mode=None):
print(mode)
if mode == None:
raise ParamException()
elif mode not in ['unprocessed', 'processing', 'examining', 'preparing', 'delivering', 'delivered', 'confirmed', 'all']:
raise ParamException()
elif mode == "all":
orders = Order.objects.filter(isdelete='0')
else:
orders = Order.objects.filter(isdelete='0', status=mode)
return orders
def getOrderByID(order_id=None, serialnumber=None):
if order_id == None and serialnumber == None:
raise ParamException()
if order_id:
if Order.objects.filter(isdelete='0', id=order_id).count() == 1:
return Order.objects.get(isdelete='0', id=order_id)
raise RFSException('50012', '无效订单')
elif serialnumber:
if Order.objects.filter(isdelete='0', serialnumber=serialnumber).count() == 1:
return Order.objects.get(isdelete='0', serialnumber=serialnumber)
raise RFSException('50012', '无效订单')
raise ParamException()
def createOrder(user_id=None, discount=1, paymentname=None, address_id=None):
if None in [user_id, discount, paymentname, address_id]:
raise ParamException()
carts = getSelectedCart(user_id)
if len(carts) == 0:
raise RFSException('50112', '未选择任何商品或购物车为空')
totalprice = 0
for cart in carts:
if cart.goods.remain < cart.amount:
raise RFSException('50111', '商品余量不足')
totalprice = totalprice + cart.goods.price * cart.amount * discount
# 创建订单表
serialnumber = "{}{}{}".format(datetime.datetime.now().strftime("%Y%m%d%H%M%S"),
int(time.time()*1000),
random.randint(1000, 9999))
order = Order.objects.create(
serialnumber=serialnumber,
user_id=user_id,
address_id=address_id,
totalprice=int(totalprice),
discount=discount,
createtime=datetime.datetime.now(),
paymentname=paymentname
)
# 创建订单详情表
for cart in carts:
createOrderDetail(order.id, cart.goods_id,
cart.goods.price, cart.amount)
deleteSelectedCart(user_id)
return order
def paidOrder(order_id=None):
pass
def changeOrder(order_id=None, mode=None):
if None in [order_id, mode]:
raise ParamException()
if mode not in ['processing', 'examining', 'preparing', 'delivering', 'delivered', 'unprocessed']:
raise RFSException('50513', '订单状态非法')
order = getOrderByID(order_id)
# 检验当前状态是否正确
if order.status == 'processing' and mode == "processing":
order.status = 'examining'
elif order.status == 'examining' and mode == "examining":
order.status = 'preparing'
elif order.status == 'preparing' and mode == "preparing":
order.status = 'delivering'
elif order.status == 'delivering' and mode == "delivering":
order.status = 'delivered'
elif order.status == 'delivered' and mode == "delivered":
order.status = 'confirmed'
elif order.status == 'unprocessed' and mode == "unprocessed":
order.status = 'processing'
else:
raise RFSException('50513', '订单状态非法')
order.save()
def createOrderDetail(order_id=None, goods_id=None, price=None, amount=None):
if None in [order_id, goods_id, price, amount]:
raise ParamException()
return OrderDetail.objects.create(
order_id=order_id, goods_id=goods_id, price=int(price), amount=amount)
def deleteOrder(order_id=None):
if order_id == None:
raise ParamException()
order = getOrderByID(order_id)
if order_obj.status in['processing', 'examining', 'confirmed']:
order.isdelete = '1'
order.save()
else:
raise RFSException("50301", "删除失败")
def paidConfirm(order_id=None, serialnumber=None):
order = getOrderByID(order_id, serialnumber)
if order.status == "unprocessed":
order.status = "examining"
order.paidtime = datetime.datetime.now()
order.save()
else:
raise RFSException("90101", "请勿重复支付")
return order
| 2.078125 | 2 |
models/ev/gpq.py | muslax/aces-api | 0 | 12759547 | <reponame>muslax/aces-api
from typing import List
from models.base import BaseModel, DBModel, WithLicense, WithProject
from models.ev.evidence import EvidenceBase
class GPQRow(BaseModel):
seq: int
wbSeq: int # nomer urut di workbook
element: str = None # simbol elemen
statement: str = None # Lorem ipsum...
saved: int = None # time when record was saved
elapsed: int = None # elapsed time since previous touch event
class GPQRowUpdate(BaseModel):
seq: int
wbSeq: int
total: int
element: str
statement: str
lastTouch: int
class GPQEvidenceCreate(EvidenceBase):
sequence: str
rows: List[GPQRow] = []
# class GPQEvidence(EvidenceBase, WithProject, WithLicense, DBModel):
class GPQEvidence(GPQEvidenceCreate, DBModel):
pass
| 2.390625 | 2 |
reviewsapp/tvshows/models.py | xN03Lx/reviewsapp | 0 | 12759548 | from django.db import models
from reviewsapp.celebrities.models import Celebrity
from reviewsapp.core.behaviors import Filmable, Timestampable
class Network(models.Model):
name = models.CharField(max_length=100)
class TvShow(Filmable, Timestampable, models.Model):
premiere_date = models.DateField()
cast_crew = models.ManytoMany(Celebrity, related_name='tvshows', through='tvShowCastCrew')
creators = models.ManytoMany(Celebrity)
network = models.ForeignKey(Network, on_delete=models.SET_NULL, null=True)
def __str__(self):
return self.title
class Crew(models.Model):
celebrity = models.ForeignKey(Celebrity, on_delete=models.CASCADE)
tv_show = models.ForeignKey(TvShow, on_delete=models.CASCADE)
class TvSeason(models.Model):
pass
class TvEpisode(models.Model):
pass
| 2.046875 | 2 |
tools/convert_old_preshow_to_new.py | MattDietz/lightshowpi | 3 | 12759549 | <filename>tools/convert_old_preshow_to_new.py
# This tool will allow you to convert your old preshow config to the
# new preshow_configuration.
# usage
# python convert_old_preshow_to_new.py
import collections
import json
import os
import sys
print "This will generate a new config file with the old style preshow removed"
print "It will not change any of your existing files, a new files will be added"
print "to your config folder, and you can decide how to use it."
print
question = raw_input("Would you like to proceed? (Y to continue) :")
if not question in ["y", "Y"]:
sys.exit(0)
HOME_DIR = os.getenv("SYNCHRONIZED_LIGHTS_HOME")
if not HOME_DIR:
print("Need to setup SYNCHRONIZED_LIGHTS_HOME environment variable, "
"see readme")
sys.exit()
CONFIG_DIR = HOME_DIR + '/config/'
# hack to get the configuration_manager to load from a different directory
path = list(sys.path)
# insert script location and configuration_manager location into path
sys.path.insert(0, HOME_DIR + "/py")
# import the configuration_manager so we save ourselves a lot or work
import configuration_manager as cm
# get a copy of the configuration to work with
config = cm.CONFIG
# the old
old_preshow = list(config.get('lightshow','preshow').split(','))
# an ordered dict so it looks pretty going back in
preshow = collections.OrderedDict()
preshow['transitions'] = []
# the work horse
for transition in old_preshow:
transition = transition.split(':')
if len(transition) == 0 or (len(transition) == 1 and len(transition[0]) == 0):
continue
if len(transition) != 2:
continue
transition_config = dict()
transition_type = str(transition[0]).lower()
if not transition_type in ['on', 'off']:
continue
transition_config['type'] = transition_type
transition_config['duration'] = float(transition[1])
preshow['transitions'].append(transition_config)
# add the audio option, setting as no audio
preshow['audio_file'] = 'null'
# format preshow_configuration and put it in
data = "\n" + str(json.dumps(preshow, indent=4))
config.set('lightshow', 'preshow_configuration', data)
# remove old preshow for the last time
config.remove_option('lightshow', 'preshow')
# write new config file to config folder and name new.cfg
with open(CONFIG_DIR + "new.cfg", "w") as new_config:
config.write(new_config)
# let the user know
print
print "Your updated config file is located in the config folder and named new.cfg"
print "It contains all the info from your current config files, you can delete"
print "any items you do not want or need to override from the defaults.cfg"
print "but all you really new to do is rename it to override.cfg and your ready"
print "to go, if you also has the old_preshow config in .lights.cfg you should"
print "remove that option from that file"
# restore path
sys.path[:] = path
| 2.859375 | 3 |
adult/mixture_model/load_adult.py | DPBayes/data-sharing-examples | 0 | 12759550 | <gh_stars>0
import numpy as np
import pandas as pd
def obj2int(x):
uniques = np.unique(x)
return {key:i for i,key in enumerate(uniques)}, {i:key for i,key in enumerate(uniques)}
def fetch_data(include_test=False):
original_data = pd.read_csv(
"../data/adult.data",
names=[
"Age", "Workclass", "fnlwgt", "Education", "Education-Num", "Martial Status",
"Occupation", "Relationship", "Race", "Sex", "Capital Gain", "Capital Loss",
"Hours per week", "Country", "Target"],
sep=r'\s*,\s*',
engine='python',
na_values="?")
data = original_data.dropna()
## Privbayes was trained with both train and test data, so try that with DPVI
test_data = pd.read_csv('../data/adult.test', sep=r'\s*,\s*', comment="|", header = None, names=data.columns,\
engine="python", na_values="?")
test_data = test_data.dropna()
if include_test : data = data.append(test_data)
maps = {}
# Encode variables
for col in data.columns:
if data[col].dtype in ['int', 'float']:
min_value = np.min(data[col])
max_value = np.max(data[col])
maps[col] = [min_value, max_value]
data[col] = np.clip((data[col]-min_value)/(max_value-min_value), 1e-6, 1-1e-6)
if data[col].dtype == 'O':
maps[col] = obj2int(data[col])
data[col] = data[col].map(maps[col][0])
return data, original_data, maps
def decode_data(syn_data, maps):
synthetic_data = pd.DataFrame()
for col in syn_data.columns:
decode_map = maps[col]
if type(decode_map)==list:
min_value = decode_map[0]
max_value = decode_map[1]
synthetic_data[col] = syn_data[col]*(max_value-min_value)+min_value
else:
synthetic_data[col] = syn_data[col].map(decode_map[1])
return synthetic_data
| 2.5625 | 3 |