content
stringlengths 5
1.05M
|
|---|
from __future__ import print_function
import json
import random
import logging
print('Loading function')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def respond(err, res=None):
return res
def lambda_handler(event, context):
print("Print Received event: " + json.dumps(event, indent=2))
num_list = event['numbers']
# print("num_str : ", type(num_str), num_str)
# print("num_str[0] : ", type(num_str[0]), num_str[0])
my_sorted = sorted(num_list)
return respond(None, my_sorted)
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import json
from collections import OrderedDict
from collections import defaultdict
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from ..modeling.keypoint_utils import oks_nms
__all__ = ['KeyPointTopDownCOCOEval']
class KeyPointTopDownCOCOEval(object):
def __init__(self,
anno_file,
num_samples,
num_joints,
output_eval,
iou_type='keypoints',
in_vis_thre=0.2,
oks_thre=0.9):
super(KeyPointTopDownCOCOEval, self).__init__()
self.coco = COCO(anno_file)
self.num_samples = num_samples
self.num_joints = num_joints
self.iou_type = iou_type
self.in_vis_thre = in_vis_thre
self.oks_thre = oks_thre
self.output_eval = output_eval
self.res_file = os.path.join(output_eval, "keypoints_results.json")
self.reset()
def reset(self):
self.results = {
'all_preds': np.zeros(
(self.num_samples, self.num_joints, 3), dtype=np.float32),
'all_boxes': np.zeros((self.num_samples, 6)),
'image_path': []
}
self.eval_results = {}
self.idx = 0
def update(self, inputs, outputs):
kpts, _ = outputs['keypoint'][0]
num_images = inputs['image'].shape[0]
self.results['all_preds'][self.idx:self.idx + num_images, :, 0:
3] = kpts[:, :, 0:3]
self.results['all_boxes'][self.idx:self.idx + num_images, 0:
2] = inputs['center'].numpy()[:, 0:2]
self.results['all_boxes'][self.idx:self.idx + num_images, 2:
4] = inputs['scale'].numpy()[:, 0:2]
self.results['all_boxes'][self.idx:self.idx + num_images, 4] = np.prod(
inputs['scale'].numpy() * 200, 1)
self.results['all_boxes'][self.idx:self.idx + num_images,
5] = np.squeeze(inputs['score'].numpy())
self.results['image_path'].extend(inputs['im_id'].numpy())
self.idx += num_images
def _write_coco_keypoint_results(self, keypoints):
data_pack = [{
'cat_id': 1,
'cls': 'person',
'ann_type': 'keypoints',
'keypoints': keypoints
}]
results = self._coco_keypoint_results_one_category_kernel(data_pack[0])
if not os.path.exists(self.output_eval):
os.makedirs(self.output_eval)
with open(self.res_file, 'w') as f:
json.dump(results, f, sort_keys=True, indent=4)
try:
json.load(open(self.res_file))
except Exception:
content = []
with open(self.res_file, 'r') as f:
for line in f:
content.append(line)
content[-1] = ']'
with open(self.res_file, 'w') as f:
for c in content:
f.write(c)
def _coco_keypoint_results_one_category_kernel(self, data_pack):
cat_id = data_pack['cat_id']
keypoints = data_pack['keypoints']
cat_results = []
for img_kpts in keypoints:
if len(img_kpts) == 0:
continue
_key_points = np.array(
[img_kpts[k]['keypoints'] for k in range(len(img_kpts))])
_key_points = _key_points.reshape(_key_points.shape[0], -1)
result = [{
'image_id': img_kpts[k]['image'],
'category_id': cat_id,
'keypoints': _key_points[k].tolist(),
'score': img_kpts[k]['score'],
'center': list(img_kpts[k]['center']),
'scale': list(img_kpts[k]['scale'])
} for k in range(len(img_kpts))]
cat_results.extend(result)
return cat_results
def get_final_results(self, preds, all_boxes, img_path):
_kpts = []
for idx, kpt in enumerate(preds):
_kpts.append({
'keypoints': kpt,
'center': all_boxes[idx][0:2],
'scale': all_boxes[idx][2:4],
'area': all_boxes[idx][4],
'score': all_boxes[idx][5],
'image': int(img_path[idx])
})
# image x person x (keypoints)
kpts = defaultdict(list)
for kpt in _kpts:
kpts[kpt['image']].append(kpt)
# rescoring and oks nms
num_joints = preds.shape[1]
in_vis_thre = self.in_vis_thre
oks_thre = self.oks_thre
oks_nmsed_kpts = []
for img in kpts.keys():
img_kpts = kpts[img]
for n_p in img_kpts:
box_score = n_p['score']
kpt_score = 0
valid_num = 0
for n_jt in range(0, num_joints):
t_s = n_p['keypoints'][n_jt][2]
if t_s > in_vis_thre:
kpt_score = kpt_score + t_s
valid_num = valid_num + 1
if valid_num != 0:
kpt_score = kpt_score / valid_num
# rescoring
n_p['score'] = kpt_score * box_score
keep = oks_nms([img_kpts[i] for i in range(len(img_kpts))],
oks_thre)
if len(keep) == 0:
oks_nmsed_kpts.append(img_kpts)
else:
oks_nmsed_kpts.append([img_kpts[_keep] for _keep in keep])
self._write_coco_keypoint_results(oks_nmsed_kpts)
def accumulate(self):
self.get_final_results(self.results['all_preds'],
self.results['all_boxes'],
self.results['image_path'])
coco_dt = self.coco.loadRes(self.res_file)
coco_eval = COCOeval(self.coco, coco_dt, 'keypoints')
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
keypoint_stats = []
for ind in range(len(coco_eval.stats)):
keypoint_stats.append((coco_eval.stats[ind]))
self.eval_results['keypoint'] = keypoint_stats
def log(self):
stats_names = [
'AP', 'Ap .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5',
'AR .75', 'AR (M)', 'AR (L)'
]
num_values = len(stats_names)
print(' '.join(['| {}'.format(name) for name in stats_names]) + ' |')
print('|---' * (num_values + 1) + '|')
print(' '.join([
'| {:.3f}'.format(value) for value in self.eval_results['keypoint']
]) + ' |')
def get_results(self):
return self.eval_results
|
import cv2 as cv
import numpy as np
titleWindow = 'Hit_miss.py'
input_image = np.array((
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 255, 255, 255, 0, 0, 0, 255],
[0, 255, 255, 255, 0, 0, 0, 0],
[0, 255, 255, 255, 0, 255, 0, 0],
[0, 0, 255, 0, 0, 0, 0, 0],
[0, 0, 255, 0, 0, 255, 255, 0],
[0,255, 0, 255, 0, 0, 255, 0],
[0, 255, 255, 255, 0, 0, 0, 0]), dtype="uint8")
kernel = np.array((
[0, 1, 0],
[1, -1, 1],
[0, 1, 0]), dtype="int")
output_image = cv.morphologyEx(input_image, cv.MORPH_HITMISS, kernel)
rate = 50
kernel = (kernel + 1) * 127
kernel = np.uint8(kernel)
kernel = cv.resize(kernel, None, fx = rate, fy = rate, interpolation = cv.INTER_NEAREST)
cv.imshow(titleWindow + " - kernel", kernel)
cv.moveWindow(titleWindow + " - kernel", 0, 0)
input_image = cv.resize(input_image, None, fx = rate, fy = rate, interpolation = cv.INTER_NEAREST)
cv.imshow(titleWindow + " - Original", input_image)
cv.moveWindow(titleWindow + " - Original", 0, 200)
output_image = cv.resize(output_image, None , fx = rate, fy = rate, interpolation = cv.INTER_NEAREST)
cv.imshow(titleWindow + " - Hit or Miss", output_image)
cv.moveWindow(titleWindow + " - Hit or Miss", 500, 200)
cv.waitKey(0)
cv.destroyAllWindows()
|
import smart_imports
smart_imports.all()
settings = dext_app_settings.app_settings('PVP',
BALANCER_SLEEP_TIME=5,
BALANCING_TIMEOUT=5 * 60,
BALANCING_MAX_LEVEL_DELTA=16,
BALANCING_MIN_LEVEL_DELTA=4,
BALANCING_WITHOUT_LEVELS=False # remove level limitation
)
|
from datetime import date
a = int(input('Que ano quer analisar? Coloque 0 para analisar o ano atual: '))
if a == 0:
a = date.today().year
if a % 4 == 0 and a % 100 != 0 or a % 400 == 0:
print('O ano {} é bissexto'.format(a))
else:
print('O ano {} NÃO é BISSEXTO'.format(a))
|
from django.utils.text import slugify
from dcim.models import Site
from extras.scripts import *
class MyScript(Script):
class Meta:
name = "Fix Site Slug"
description = "updates all sites to use the lower case facility code as the site slug"
commit_default = False
def run(self, data, commit):
for site in Site.objects.all():
try:
site.slug = site.facility.lower()
site.save()
except KeyError:
self.log_warning(f"Could not update site [{site}]")
|
from unityagents import UnityEnvironment
import numpy as np
from dqn_agent import Agent
from collections import deque
import torch
# Get the Unity environment
env = UnityEnvironment(file_name="Banana_Windows_x86_64/Banana.exe")
# Get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
env_info = env.reset(train_mode=False)[brain_name] # Reset the environment
state = env_info.vector_observations[0] # Initialize the state
def run():
global env
score = 0 # Initialize the score
env_info = env.reset(train_mode=False)[brain_name] # Reset the environment
state = env_info.vector_observations[0] # Initialize the state
while True:
action = int(agent.act(state))
env_info = env.step(action)[brain_name] # Send the action to the environment
next_state = env_info.vector_observations[0] # Get the next state
reward = env_info.rewards[0] # Get the reward
done = env_info.local_done[0] # See if episode has finished
score += reward # Update the score
state = next_state # Update the state
if done: # Exit loop if episode finished
break
agent = Agent(state_size=len(state), action_size=brain.vector_action_space_size, seed=1)
run()
agent.qnetwork_local.load_state_dict(torch.load('agent.pth'))
run()
# Close the environment
env.close()
|
"""
Mock plugin file for tests
"""
import numpy as np
from batman.functions import Ishigami, Branin
f_ishigami = Ishigami()
f_branin = Branin()
def f_snapshot(point):
return np.array([42, 87, 74, 74])
|
# coding: utf-8
# Copyright (c) 2018 ubirch GmbH.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ubirch.anchoring import *
from kafka import *
args = set_arguments("multichain")
server = args.server
if server == 'SQS':
print("SERVICE USING SQS QUEUE MESSAGING")
print("output queue name : %s" % args.output)
url = args.url
region = args.region
aws_secret_access_key = args.accesskey
aws_access_key_id = args.keyid
output_messages = get_queue(args.output, url, region, aws_secret_access_key, aws_access_key_id)
producer = None
while True:
response = output_messages.receive_messages()
for r in response:
print(r.body)
r.delete()
elif server == 'KAFKA':
print("SERVICE USING APACHE KAFKA FOR MESSAGING")
print("output topic name : %s" % args.output)
bootstrap_server = args.bootstrap_server
output_messages = KafkaConsumer(args.output, bootstrap_servers=bootstrap_server,
value_deserializer=lambda m: json.dumps(m.decode('ascii')))
for message in output_messages:
print(json.loads(message.value))
|
# country code used is alpha 3 country code
currency_countrycode_mapping = {'AED':'ARE',
'AFN':'AFG',
'ALL':'ALB',
'AMD':'ARM',
'ANG':'ANT',
'AOA':'AGO',
'ARS':'ARG',
'AUD':'AUS',
'AWG':'ABW',
'AZN':'AZE',
'BAM':'BIH',
'BBD':'BRB',
'BDT':'BGD',
'BGN':'BGR',
'BHD':'BHR',
'BIF':'BDI',
'BMD':'BMU',
'BND':'BRN',
'BOB':'BOL',
'BRL':'BRA',
'BSD':'BHS',
'BTN':'BTN',
'BWP':'BWA',
'BYR':'BLR',
'BZD':'BLZ',
'CAD':'CAN',
'CDF':'COD',
'XAF':'CAF',
'CHF':'CHE',
'CLP':'CHL',
'CNH':'CHN',
'CNY':'CHN',
'COP':'COL',
'CRC':'CRI',
'CUP':'CUB',
'CVE':'CPV',
'CZK':'CZE',
'DJF':'DJI',
'DKK':'DNK',
'DOP':'DOM',
'DZD':'DZA',
'ECS':'ECU',
'EGP':'EGY',
'ERN':'ERI',
'ETB':'ETH',
'EUR':'EUR',
'FJD':'FJI',
'FKP':'FLK',
'GBP':'GBR',
'GEL':'GEO',
'GHS':'GHA',
'GIP':'GIB',
'GMD':'GMB',
'GNF':'GIN',
'GTQ':'GTM',
'GYD':'GUY',
'HKD':'HKG',
'HNL':'HND',
'HRK':'HRV',
'HTG':'HTI',
'HUF':'HUN',
'IDR':'IDN',
'ILS':'ISR',
'INR':'IND',
'IQD':'IRQ',
'ISK':'ISL',
'JMD':'JAM',
'JOD':'JOR',
'JPY':'JPN',
'KES':'KEN',
'KGS':'KGZ',
'KHR':'KHM',
'KRW':'KOR',
'KWD':'KWT',
'KYD':'CYM',
'KZT':'KAZ',
'LAK':'LAO',
'LBP':'LBN',
'LKR':'LKA',
'LRD':'LBR',
'LSL':'LSO',
'LYD':'LBY',
'MAD':'MAR',
'MDL':'MDA',
'MGA':'MDG',
'MKD':'MKD',
'MMK':'MMR',
'MNT':'MNG',
'MOP':'MAC',
'MRO':'MRT',
'MUR':'MUS',
'MVR':'MDV',
'MWK':'MWI',
'MXN':'MEX',
'MYR':'MYS',
'MZN':'MOZ',
'NAD':'NAM',
'NGN':'NGA',
'NIO':'NIC',
'NOK':'NOR',
'NPR':'NPL',
'NZD':'NZL',
'OMR':'OMN',
'PAB':'PAN',
'PEN':'PER',
'XPF':'PYF',
'PGK':'PNG',
'PHP':'PHL',
'PKR':'PAK',
'PLN':'POL',
'PYG':'PRY',
'QAR':'QAT',
'RON':'ROU',
'RSD':'SRB',
'RUB':'RUS',
'RWF':'RWA',
'SAR':'SAU',
'SBD':'SLB',
'SCR':'SYC',
'SEK':'SWE',
'SGD':'SGP',
'SHP':'SHN',
'SLL':'SLE',
'SOS':'SOM',
'SRD':'SUR',
'STD':'STP',
'SVC':'SLV',
'SZL':'SWZ',
'THB':'THA',
'TJS':'TJK',
'TND':'TUN',
'TOP':'TON',
'TRY':'TUR',
'TTD':'TTO',
'TWD':'TWN',
'TZS':'TZA',
'UAH':'UKR',
'UGX':'UGA',
'USD':'USA',
'UYU':'URY',
'UZS':'UZB',
'VEF':'VEN',
'VND':'VNM',
'VUV':'VUT',
'WST':'WSM',
'YER':'YEM',
'ZAR':'ZAF',
'ZMW':'ZMB'
}
def currency_to_countrycode(currency):
return currency_countrycode_mapping.get(currency,None)
def countrycode_to_currency(country):
for currency, countrycode in currency_countrycode_mapping.items():
if countrycode == country:
return currency
return None
|
from HiParTIPy.PtiCffi import pti,PTI
import os
class VecBuff:
def __init__(self,size):
self.nthreads = (int)(os.popen('grep -c cores /proc/cpuinfo').read())
print(self.nthreads)
self.address = pti.cast("ptiValueVector *", PTI.malloc(self.nthreads * pti.sizeof(pti.new("ptiValueVector *"))))
PTI.ptiMakeVectorBuff(self.address, size)
def free(self):
PTI.ptiFreeVecBuff(self.address)
class MatBuff:
def __init__(self,ncols,nrows):
self.nthreads = (int)(os.popen('grep -c cores /proc/cpuinfo').read())
print(self.nthreads)
self.address = pti.cast("ptiMatrix *", PTI.malloc(self.nthreads * pti.sizeof(pti.new("ptiMatrix *"))))
PTI.ptiMakeMatrixBuff(self.address, ncols,nrows)
|
from Bio import SeqIO
import sys
# take the output of gffread and make a file with the longest isoforms
genes = SeqIO.to_dict(SeqIO.parse(sys.argv[1],"fasta"))
longest = {}
for rec in genes:
gene = genes[rec].description.split("=")[-1]
if (gene not in longest) or (longest[gene][0] < len(rec)):
longest[gene] = (len(genes[rec]),genes[rec].id)
for gene in longest:
description = genes[longest[gene][1]].id
genes[longest[gene][1]].id = gene
genes[longest[gene][1]].description = description
print(genes[longest[gene][1]].format("fasta"), end="")
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, trava and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class LabelPrinting(Document):
def validate(self):
self.validate_item()
def validate_item(self):
for item in self.items:
data_item = frappe.db.get_value("Item",
filters={"item_code":item.item_code}, fieldname=["item_name", "article"])
item.item_name = data_item[0]
item.article = data_item[1]
|
from typing import Any, Optional
import attr
@attr.s(eq=False, frozen=True, slots=True)
class Vertex:
"""Represents a vertex in a Knowledge Graph."""
name = attr.ib(type=str, validator=attr.validators.instance_of(str))
predicate = attr.ib(
default=False,
type=bool,
validator=attr.validators.instance_of(bool),
repr=False,
)
vprev = attr.ib(default=None, type=Optional[Any], repr=False)
vnext = attr.ib(default=None, type=Optional[Any], repr=False)
def __eq__(self, other: Any) -> bool:
"""Defines behavior for the equality operator, ==.
Args:
other: The other vertex to test the equality.
Returns:
True if the hash of the vertices are equal, False otherwise.
"""
if not isinstance(other, Vertex):
return False
elif self.predicate:
return (self.vprev, self.vnext, self.name) == (
other.vprev,
other.vnext,
other.name,
)
return self.name == other.name
def __hash__(self) -> int:
"""Defines behavior for when hash() is called on a vertex.
Returns:
The identifier and name of the vertex, as well as its previous
and next neighbor if the vertex has a predicate. The hash of
the name of the vertex otherwise.
"""
if self.predicate:
return hash((self.vprev, self.vnext, self.name))
return hash(self.name)
def __lt__(self, other: "Vertex") -> bool:
"""Defines behavior for the small than operator, <.
Args:
other: The other vertex.
Returns:
True if the first vertex is smaller than the second, False
otherwise.
"""
return self.name < other.name
|
# wsdl.py - WSDLParser class, part of osa.
# Copyright 2013 Sergey Bozhenkov, boz at ipp.mpg.de
# Licensed under LGPLv3 or later, see the COPYING file.
"""
Conversion of WSDL documents into Python.
"""
from . import xmlnamespace
from . import xmlparser
from . import xmlschema
from . import xmltypes
from . import message
from . import method
import xml.etree.cElementTree as etree
class WSDLParser(object):
"""
Parser to get types and methods defined in the document.
"""
def __init__(self, wsdl_url):
"""
Initialize parser.
The WSDL document is loaded and is converted into xml.
Initialized members:
self.wsdl_url - url of wsdl document, or file
self.wsdl - xml document read from wsdl_url (etree.Element)
self.tns - target namespace
Parameters
----------
wsdl_url : str
Address of the WSDL document.
"""
self.wsdl_url = wsdl_url
self.wsdl = xmlparser.parse_qualified_from_url(wsdl_url)
if self.wsdl.tag != "{%s}definitions" % xmlnamespace.NS_WSDL:
raise ValueError("Not a WSDL xml, the top level element: %s" %
self.wsdl.tag)
# get target namespace
self.tns = self.wsdl.get('targetNamespace', "")
def get_types(self, ):
"""
Constructs a map of all types defined in the document.
Returns
-------
out : dict
A map of found types {type_name : complex class}
"""
types_section = self.wsdl.findall('.//{%s}types' %
xmlnamespace.NS_WSDL)[0]
schemas = types_section.findall('./{%s}schema' % xmlnamespace.NS_XSD)
xtypes = {}
for schema in schemas:
parser = xmlschema.XMLSchemaParser(schema, wsdl_url=self.wsdl_url)
xtypes.update(parser.get_list_of_defined_types())
types = xmlschema.XMLSchemaParser.convert_xmltypes_to_python(xtypes)
xmltypes.XMLAny._types.update(types)
return types
def get_messages(self, types):
"""
Construct messages from message section.
Parameters
----------
types : dictionary of types
Types as returned by get_types().
Returns
-------
out : dict
Map message name -> Message instance
"""
xmessages = self.wsdl.findall('./{%s}message' % xmlnamespace.NS_WSDL)
messages = {}
for x in xmessages:
message_name = "{%s}%s" % (self.tns, x.get("name", ""))
parts = []
xparts = x.findall('./{%s}part' % xmlnamespace.NS_WSDL)
for y in xparts:
part_name = y.get("name", "")
part_type = y.get("element", None)
if part_type is None:
part_type = y.get("type", None)
if part_type is None:
raise ValueError("Could not find part type in:\n %s"
% (etree.tostring(x).decode()))
cls = None
if part_type in types:
cls = types[part_type]
elif part_type in xmltypes.primmap:
cls = xmltypes.primmap[part_type]
else:
raise ValueError("Type %s not found for message:\n%s" %
(part_type, etree.tostring(x).decode()))
parts.append([part_name, cls])
messages[message_name] = message.Message(message_name,
parts)
return messages
def get_operations(self, messages):
"""
Get list of operations with messages
from portType section.
Parameters
----------
messages : dict
Dictionary of message from `get_messages`.
Returns
-------
out : dict
{portType -> {operation name -> Method instance}}
The method here does not have location.
"""
xports = self.wsdl.findall('./{%s}portType' % xmlnamespace.NS_WSDL)
ports = {}
for xport in xports:
port_name = "{%s}%s" % (self.tns, xport.get("name", ""))
ports[port_name] = {}
xops = xport.findall('./{%s}operation' % xmlnamespace.NS_WSDL)
for xop in xops:
op_name = xop.get("name", "")
ports[port_name][op_name] = {}
xin = xop.findall('./{%s}input' % xmlnamespace.NS_WSDL)
if not(xin):
raise ValueError("No input message in operation: \n%s" %
(etree.tostring(xop).decode()))
in_name = xin[0].get("message", "")
if not in_name in messages:
raise ValueError("Message %s not found." % in_name)
in_cl = messages[in_name]
out_cl = None
xout = xop.findall('./{%s}output' % xmlnamespace.NS_WSDL)
if xout:
out_name = xout[0].get("message", "")
if not out_name in messages:
raise ValueError("Message %s not found." % in_name)
out_cl = messages[out_name]
# documentation
doc = xop.find('{%s}documentation' % xmlnamespace.NS_WSDL)
if doc is not None:
doc = doc.text
op = method.Method(op_name, in_cl, out_cl, doc=doc)
ports[port_name][op_name] = op
return ports
def get_bindings(self, operations):
"""
Check binding document/literal and http transport.
If any of the conditions is not satisfied
the binding is dropped, i.e. not present in
the return value. This also sets soapAction
and use_parts of the messages.
Parameters
----------
operations : dict as returned by get_operations
Returns
-------
out : dict
Map similar to that from get_operations but
with binding names instead of portType names.
"""
xbindings = self.wsdl.findall("./{%s}binding" % xmlnamespace.NS_WSDL)
bindings = {}
for xb in xbindings:
b_name = "{%s}%s" % (self.tns, xb.get("name", ""))
b_type = xb.get("type", None)
if b_type is None:
raise ValueError("No type in binding %s" %
(etree.tostring(xb).decode()))
if not b_type in operations:
raise ValueError("Binding type %s no in operations" % b_type)
xb_soap = xb.findall("./{%s}binding" % xmlnamespace.NS_SOAP)
if not(xb_soap):
continue # not a soap binding in wsdl
if xb_soap[0].get("style", "") == "rpc":
continue
if xb_soap[0].get("transport", "") !=\
"http://schemas.xmlsoap.org/soap/http":
continue
ops = operations[b_type]
bindings[b_name] = {}
xops = xb.findall("./{%s}operation" % xmlnamespace.NS_WSDL)
for xop in xops:
op_name = xop.get("name", "")
if not op_name in ops:
raise ValueError("operation %s no in operations" %
op_name)
soap_op = xop.find("./{%s}operation" % xmlnamespace.NS_SOAP)
s_action = None
if soap_op is not None:
s_action = soap_op.get("soapAction", "")
all_literal = True
xop_in = xop.find("./{%s}input" % xmlnamespace.NS_WSDL)
if xop_in is not None:
xop_in_body = xop_in.find("./{%s}body" % xmlnamespace.NS_SOAP)
if xop_in_body is None:
raise ValueError("No body found for %s" %
(etree.tostring(xop).decode()))
if xop_in_body.get("use") != "literal":
all_literal = False
parts = xop_in_body.get("parts")
if parts is None:
ops[op_name].input.use_parts = ops[op_name].input.parts
else:
parts = parts.split(" ")
ops[op_name].input.use_parts = []
for p in parts:
for pp in ops[op_name].input.parts:
if pp[0] == p:
ops[op_name].input.use_parts.append(pp)
break
xop_out = xop.find("./{%s}output" % xmlnamespace.NS_WSDL)
if xop_out is not None:
xop_out_body = xop_out.find("./{%s}body" % xmlnamespace.NS_SOAP)
if xop_out_body is None:
raise ValueError("No body found for %s" %
(etree.tostring(xop).decode()))
if xop_out_body.get("use") != "literal":
all_literal = False
parts = xop_out_body.get("parts")
if parts is None:
ops[op_name].output.use_parts = ops[op_name].output.parts
else:
parts = parts.split(" ")
ops[op_name].output.use_parts = []
for p in parts:
for pp in ops[op_name].output.parts:
if pp[0] == p:
ops[op_name].output.use_parts.append(pp)
break
# rebuild __doc__ after messing with messages
ops[op_name]._redoc()
if all_literal:
ops[op_name].action = s_action
bindings[b_name][op_name] = ops[op_name]
return bindings
def get_services(self, bindings):
"""
Find all services an make final list of
operations.
This also sets location to all operations.
Parameters
----------
bindings : dic from get_bindings.
Returns
-------
out : dict
Dictionary {service -> {operation name -> method}.
"""
xservices = self.wsdl.findall('./{%s}service' % xmlnamespace.NS_WSDL)
services = {}
for xs in xservices:
s_name = xs.get("name", "")
xports = xs.findall('./{%s}port' % xmlnamespace.NS_WSDL)
for xp in xports:
b = xp.get("binding", "")
xaddr = xp.findall('./{%s}address' % xmlnamespace.NS_SOAP)
if not(xaddr):
continue # no soap 11
loc = xaddr[0].get("location", "")
if b in bindings:
for k, v in bindings[b].items():
v.location = loc
services[s_name] = bindings[b]
return services
def parse(self):
"""
Do parsing, return types, services.
Returns
-------
out : (types, services)
"""
t = self.get_types()
m = self.get_messages(t)
op = self.get_operations(m)
b = self.get_bindings(op)
s = self.get_services(b)
return t, s
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .multihead_attention import MultiheadAttention
from .graphormer_layers import GraphNodeFeature, GraphAttnBias
from .graphormer_graph_encoder_layer import GraphormerGraphEncoderLayer
from .graphormer_graph_encoder import GraphormerGraphEncoder, init_graphormer_params
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020 CPV.BY
# LICENSE: Commercial
# класс работы с локальной БД SQL
# таблицы - GRUPPA, TOVAR, MOD, CASHIER, CLIENT
import os
from libs.base.lbsql import qsql
import json
import time
from datetime import datetime
from libs.applibs.programsettings import oConfig
from kivy.config import ConfigParser
class Oqsql(qsql):
dbname = 'localdb'
ctables = 'gruppa','tovar','cashier'
lopen =False
dictstru =[] # структура таблиц
config = None
dictstrusave = [] # структурвтаблицы для записи
def __init__(self, **kwargs):
super(Oqsql, self).__init__(**kwargs)
self.sqlstrconnect = self.dbname + '.db'
self.config = oConfig(oApp = self)
# проверка наличия БД, при отсутствии-создание БД и таблицctable
self.dictstru = []
self.dictstrusave = []
#if not self.dbcheck_lite():return True
# проверка наличия БД
# создание БД при отсутствии
# сверка структуры БД
def dbcheck_lite(self):
if not self.open():return False
ctxt = "SELECT " + "name FROM sqlite_master WHERE type='table' AND name='tovar';"
if not self.execute(ctxt): return False
allrec = self.getresult()
if len(allrec) > 0: return True
# Образы берём из файла структур.
if not self.getstru():return False
# проверяем наличие таблиц в БД,если нет- создаём
for item in self.dictstru:
for ctable in item:
# пропускаем служебные таблицы
if ctable in ['index','proc', 'work_id'] : continue
self.dictstrusave = []
# формируем структуру
script = ''
for csr in item[ctable]:
if len(script) > 0: script +=', '
script += csr['column'] + ' ' + self.gettype(csr['field'], csr['description'],csr['column'])
script = "CREATE TABLE IF NOT EXISTS " + ctable + "(" + script + ")"
try:
self.execute(script)
self.commit()
aa = 123
except:
self.errmsg = 'Error create sqlite table'
return False
# сохраняем поля сформированной таблицыв настройках
if not self.savetoini('structure',ctable, self.dictstrusave): return False
return True
# импорт структуры таблиц
def getstru(self):
cfile = "data/update/structure"
# проверяем наличие файла со структурой, читаем файл, импортируем структуру
if not os.path.isfile(cfile):
self.errmsg = 'Not found structure file'
return False
try:
f = open(cfile, encoding='utf-8')
except:
self.errmsg = 'Error reading structure file'
return False
alljson = json.loads(f.read())
if len(alljson) == 0:
self.errmsg ='Empty structure file'
return False
for cstr in alljson:
self.dictstru.append({cstr:alljson[cstr]})
f.close()
return True
# возвращает тип поля SQL соответствующий структуры и дефолтное значени
def gettype(self, field, description, column):
if field == 'serial':
#self.dictstrusave.append({column:'integer'})
return "INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL"
elif field == 'integer':
self.dictstrusave.append({column:'integer'})
return "INTEGER NOT NULL DEFAULT 0"
elif field == 'text':
self.dictstrusave.append({column:'text'})
return "TEXT NOT NULL DEFAULT ''"
elif field == 'boolean':
self.dictstrusave.append({column:'integer'})
return "INTEGER NOT NULL DEFAULT 0"
elif field == 'character':
self.dictstrusave.append({column:'TEXT'})
return "TEXT NOT NULL DEFAULT ''"
elif field == 'decimal':
self.dictstrusave.append({column:'real'})
return "REAL NOT NULL DEFAULT 0"
elif field == 'timestamp':
self.dictstrusave.append({column:'integer'})
default = datetime.today()
unixtime = time.mktime(default.timetuple())
return "INTEGER NOT NULL DEFAULT " + str(unixtime)
else:
self.dictstrusave.append({column:'text'})
return 'TEXT NOT NULL DEFAULT "" '
# сохранение структуры
def savetoini(self, structure,option, value):
config = ConfigParser()
config.read('set/worksetting.ini')
try:
config.setdefaults('tblstru',{option: value })
config.write()
except:
self.errmsg = 'Error save structure table'
return False
return True
def select(self,ctable, cwhere):
if len(cwhere) > 0:
if not self.execute('SELECT ' + '* FROM ' + ctable +' WHERE ' + cwhere+ ';'):
return False
else:
if not self.execute('SELECT ' + '* FROM ' + ctable + ';'):
return False
return True
# вставляем данные в таблицу
def insert(self, ctable, dictdata):
column = ''; value =''
for item in dictdata:
if len(column) != 0: column += ', '
if len(value) != 0: value += ', '
column += item
if type(dictdata[item]) != str: value += str(dictdata[item])
else: value += '"'+dictdata[item]+'"'
if not self.execute('INSERT INTO ' + ctable + '(' + column + ') VALUES('+ value +');' ): return False
return True
# удаление строк
def delete(self, ctable, cwhere):
csript = 'DELETE FROM '+ ctable + ' WHERE '+ cwhere +';'
if not self.execute(csript): return False
return True
# обновление данных
def update(self,ctable, dictdata, cwhere):
cstr =''
for item in dictdata:
if len(cstr) != 0: cstr += ', '
cstr += item + '='
if type(dictdata[item]) != str: cstr += str(dictdata[item])
else: cstr += '"'+dictdata[item]+'"'
csript = 'UPDATE '+ ctable + ' SET '+ cstr + ' WHERE ' + cwhere +';'
if not self.execute(csript): return False
return True
|
import logging
from kazoo.client import KazooClient, KazooState
from constants import zk_sequencer_root
from strategy import SequenceStrategy
class DistributedSequenceCoordinator(object):
def __init__(self, zookeeper_connect, autoscaling_grp_name, strategy_name, instance_id, max_sequence_id,
asg_instances_ids):
self.zk = KazooClient(hosts=zookeeper_connect)
self.running = False
self.interrupted = False
self.autoscaling_grp_name = autoscaling_grp_name
self.strategy_name = strategy_name
self.instance_id = instance_id
self.max_sequence_id = max_sequence_id
self.asg_instances_ids = asg_instances_ids
def state_change_listener(self, state):
logging.debug('zookeeper state changed to {0}'.format(state))
if state == KazooState.LOST or state == KazooState.SUSPENDED:
if self.running:
self.interrupted = True
self.log_msg('distributed coordination interrupted')
raise Exception('zookeeper session interrupted')
"""
Responsible for executing operation in isolation even-in cases of failures, connection-resets etc. Uses optimistic
concurrency control by assuming that operation would be executed without any interruption, and if any interruption
occurs, then acquires a new lock and re-execute the idempotent operation to guarantee isolation.
"""
def execute(self):
result = None
# exception-handling for cases where unable to establish connection to zookeeper
try:
# TODO: use python retrying lib to control with timeouts, max & exponential back-off wait time b/w retries
while result is None or self.interrupted:
self.running = True
self.interrupted = False
self.log_msg('distributed operation starting')
self.zk.start()
self.zk.add_listener(self.state_change_listener)
try:
lock = self.zk.Lock(zk_sequencer_root, self.autoscaling_grp_name)
logging.debug('zookeeper lock created {}'.format(lock.data))
self.log_msg('entering zookeeper lock')
with lock:
result = self.operation()
except Exception as e:
logging.exception(e)
self.log_msg('encountered zk exception')
finally:
self.log_msg('stopping zk')
self.zk.stop()
except Exception as e:
raise e
if result is None:
raise Exception('Unable to generate sequence id')
return result
def operation(self):
instances_root_path = "/".join([zk_sequencer_root, self.autoscaling_grp_name])
self.zk.ensure_path(instances_root_path)
instance_nodes = self.zk.get_children(instances_root_path)
zk_instance_sequencers = {}
for instance_node in instance_nodes:
instance_node_path = "/".join([instances_root_path, instance_node])
instance_id = self.zk.get(instance_node_path)[0]
zk_instance_sequencers[str(instance_id)] = int(instance_node)
logging.debug('zk instances: {0}'.format(zk_instance_sequencers))
instance_sequencers = {k: v for k, v in zk_instance_sequencers.items() if k in self.asg_instances_ids}
logging.debug('active instances with assigned sequences: {0}'.format(instance_sequencers))
generator = SequenceStrategy(self.strategy_name,
self.instance_id,
instance_sequencers,
self.max_sequence_id)
sequence_id = generator.get_sequence_id()
current_instance_node_path = "/".join([instances_root_path, str(sequence_id)])
self.zk.ensure_path(current_instance_node_path)
self.zk.set(current_instance_node_path, str.encode(str(self.instance_id)))
self.running = False
return sequence_id
def log_msg(self, msg):
logging.debug('{0}, running = {1}, interrupted = {2}'.format(msg, self.running, self.interrupted))
|
async def handleRequest(request):
visitor_ip = request.headers.js_get('CF-Connecting-IP')
console.log(visitor_ip)
r = await fetch("https://api.ip.sb/geoip/" + visitor_ip)
d = await r.json()
d['time'] = Date.now()
return __new__(Response(JSON.stringify(d), {
'headers' : { 'content-type' : 'text/plain' }
}))
addEventListener('fetch', (lambda event: event.respondWith(handleRequest(event.request))))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`test_standard`
==================
.. module:: test_standard
:platform: Unix, Windows
:synopsis:
.. moduleauthor:: hbldh <henrik.blidh@nedomkull.com>
Created on 2015-07-04, 12:00
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import re
import pytest
import numpy as np
from calibraxis import Calibraxis
@pytest.fixture(scope='module')
def points_1():
# Measuring range +/-8
return np.array([[-4772.38754098, 154.04459016, -204.39081967],
[3525.0346179, -68.64924886, -34.54604833],
[-658.17681729, -4137.60248854, -140.49377865],
[-564.18562092, 4200.29150327, -130.51895425],
[-543.18289474, 18.14736842, -4184.43026316],
[-696.62532808, 15.70209974, 3910.20734908],
[406.65271419, 18.46827992, -4064.61085677],
[559.45926413, -3989.69513798, -174.71879106],
[597.22629169, -3655.54153041, -1662.83257031],
[1519.02616089, -603.82472204, 3290.58469588]])
@pytest.fixture(scope='module')
def points_2():
return np.array([[-1575.43324607, 58.07787958, -72.69371728],
[1189.53102547, -11.92749837, -23.37687786],
[-212.62989556, -1369.82898172, -48.73498695],
[-183.42717178, 1408.61463096, -33.89745265],
[-162.57253886, 23.43005181, -1394.36722798],
[-216.76963011, 19.37118754, 1300.13822193],
[-809.20208605, 69.1029987, -1251.60104302],
[-1244.03955901, -866.0843061, -67.02594034],
[-1032.3692107, 811.19178082, 699.69602087],
[-538.82617188, -161.6171875, -1337.34895833]])
def test_calibration_points_1(points_1):
c = Calibraxis(verbose=False)
c.add_points(points_1)
c.calibrate_accelerometer()
np.testing.assert_almost_equal(c._calibration_errors[-1], 0.0, 2)
def test_calibration_points_1_scaled(points_1):
c = Calibraxis(verbose=False)
c.add_points(points_1 / ((2 ** 15) / 8.))
c.calibrate_accelerometer()
np.testing.assert_almost_equal(c._calibration_errors[-1], 0.0, 2)
def test_calibration_points_2(points_2):
c = Calibraxis(verbose=False)
c.add_points(points_2)
c.calibrate_accelerometer()
np.testing.assert_almost_equal(c._calibration_errors[-1], 0.0, 2)
def test_calibration_points_2_scaled(points_2):
c = Calibraxis(verbose=False)
c.add_points(points_2 / ((2 ** 15) / 16.))
c.calibrate_accelerometer()
np.testing.assert_almost_equal(c._calibration_errors[-1], 0.0, 2)
def test_recalibration_points_2(points_2):
c = Calibraxis(verbose=False)
points = points_2 / ((2 ** 15) / 16.)
for p in points[:-1, :]:
c.add_points(p)
c.calibrate_accelerometer()
np.testing.assert_almost_equal(c._calibration_errors[-1], 0.0, 2)
c.add_points(points[-1, :])
c.calibrate_accelerometer()
np.testing.assert_almost_equal(c._calibration_errors[-1], 0.0, 2)
def test_add_points_1(points_1):
c = Calibraxis(verbose=False)
points = points_1 / ((2 ** 15) / 8.)
for p in points:
c.add_points(p)
np.testing.assert_almost_equal(np.linalg.norm(np.array(c._calibration_points) - points), 0.0, 6)
c.calibrate_accelerometer()
np.testing.assert_almost_equal(c._calibration_errors[-1], 0.0, 2)
def test_add_points_2(points_1):
c = Calibraxis(verbose=False)
points = points_1 / ((2 ** 15) / 8.)
for p in points:
c.add_points(list(p))
np.testing.assert_almost_equal(np.linalg.norm(np.array(c._calibration_points) - points), 0.0, 6)
c.calibrate_accelerometer()
np.testing.assert_almost_equal(c._calibration_errors[-1], 0.0, 2)
def test_add_points_3(points_1):
c = Calibraxis(verbose=False)
points = points_1 / ((2 ** 15) / 8.)
for p in points:
c.add_points(tuple(p))
np.testing.assert_almost_equal(np.linalg.norm(np.array(c._calibration_points) - points), 0.0, 6)
c.calibrate_accelerometer()
np.testing.assert_almost_equal(c._calibration_errors[-1], 0.0, 2)
def test_add_points_4(points_2):
c = Calibraxis(verbose=False)
points = points_2 / ((2 ** 15) / 8.)
c.add_points(points.tolist())
np.testing.assert_almost_equal(np.linalg.norm(np.array(c._calibration_points) - points), 0.0, 6)
c.calibrate_accelerometer()
np.testing.assert_almost_equal(c._calibration_errors[-1], 0.0, 2)
def test_add_points_5(points_2):
c = Calibraxis(verbose=False)
points = points_2 / ((2 ** 15) / 8.)
c.add_points(points)
c.add_points([])
np.testing.assert_almost_equal(np.linalg.norm(np.array(c._calibration_points) - points), 0.0, 6)
c.calibrate_accelerometer()
np.testing.assert_almost_equal(c._calibration_errors[-1], 0.0, 2)
def test_apply(points_1):
c = Calibraxis(verbose=False)
c.add_points(points_1)
c.calibrate_accelerometer()
np.testing.assert_almost_equal(np.linalg.norm(c.apply(points_1[0, :])), 1.0, 2)
def test_batch_apply(points_1):
c = Calibraxis(verbose=False)
c.add_points(points_1)
c.calibrate_accelerometer()
out = c.batch_apply(points_1)
normed = np.sqrt((np.array(out) ** 2).sum(axis=1))
np.testing.assert_array_almost_equal(normed, 1.0, 2)
def test_error_to_few_points(points_2):
c = Calibraxis(verbose=False)
for p in points_2[:5, :]:
c.add_points(p)
with pytest.raises(ValueError):
c.calibrate_accelerometer()
def test_verbose_prints_progress(points_2, capsys):
c = Calibraxis(verbose=True)
c.add_points(points_2)
c.calibrate_accelerometer()
out, err = capsys.readouterr()
for row in filter(None, out.split('\n')):
assert re.match('^([0-9]+):\s([0-9\-\.e]+)\s*(\([0-9\s\-\.e,]+\))$', row)
|
'''
Convert CSV file to a basic ruleset package
'''
import os
import pathlib
import re
import argparse
from numpy.core.numeric import full
import pandas as pd
from tqdm import tqdm
TPL_RULENAME_CM = 'cm_%s'
TPL_REGEXP_FILENAME = "resources_regexp_re%s.txt"
TPL_USED_RES_FILENAME = "used_resources.txt"
TPL_MATCHRULES_FILENAME = "resources_rules_matchrules.txt"
TPL_CONTEXTRULE_FILENAME = "contextRule_%s.txt"
SKIP_TERMS = set(['-', '_', ''])
# current path
FULLPATH = pathlib.Path(__file__).parent.absolute()
def __norm(s):
'''
Normalize the norm
'''
# remove the terms
s2 = re.sub(r'[\\\/\-\s]', '_', s)
# remove the multiple _
s3 = re.sub(r'[_]{2,}', '_', s2)
# make a upper
s4 = s3.upper()
return s4
def __norm_dense(s):
'''
Make a dense norm
'''
return re.sub(r'[_]', '', s)
def __term(s):
'''
Normalize the term
'''
s = str(s).strip().lower()
return s
def mk_matchrule(norm):
'''
Make a matchrule line
'''
return """RULENAME="cm_{norm_dense}",REGEXP="\\b(?i)(?:%re{norm_dense})\\b",LOCATION="NA",NORM="{norm}"
""".format(norm=norm, norm_dense=__norm_dense(norm))
def create_file_used_resources(ruleset_name, text, output=FULLPATH):
'''
Create the file for all used files
'''
fn = TPL_USED_RES_FILENAME
full_fn = os.path.join(
output, ruleset_name, fn
)
with open(full_fn, 'w') as f:
f.write(text)
return os.path.join(
'.', fn
)
def create_file_matchrules(ruleset_name, text, output=FULLPATH):
'''
Create the file for the matchrules
'''
fn = TPL_MATCHRULES_FILENAME
full_fn = os.path.join(
output, ruleset_name, 'rules', fn
)
with open(full_fn, 'w') as f:
f.write(text)
return os.path.join(
'.', 'rules', fn
)
def create_file_regexp(ruleset_name, norm, text, output=FULLPATH):
'''
Creat the file for the resource regexp
'''
norm_dense = __norm_dense(norm)
fn = TPL_REGEXP_FILENAME % norm_dense
full_fn = os.path.join( output, ruleset_name, 'regexp', fn )
with open(full_fn, 'w') as f:
f.write(text)
return os.path.join(
'.', 'regexp', fn
)
def convert_to_ruleset(full_fn, output=FULLPATH, tmode='one', add_norm='yes'):
'''
Convert the file to ruleset format
'''
if full_fn.endswith('.csv'):
df = pd.read_csv(full_fn)
elif full_fn.endswith('.tsv'):
df = pd.read_csv(full_fn, sep="\t")
elif full_fn.endswith('.xlsx') or full_fn.endswith('.xls'):
df = pd.read_excel(full_fn)
else:
raise Exception('Not support file format')
# get the file name as the ruleset name for creating folder
ruleset_name = pathlib.Path(full_fn).stem
# the first column is the norm
col_norm = df.columns[0]
# the second column is the text or term
col_term = df.columns[1]
# create a dictionary for the output
r_dict = {}
# loop on each record
for idx, row in tqdm(df.iterrows()):
norm = row[col_norm]
term = row[col_term]
# make sure the term is a text
term = str(term)
# get the normalized norm and text
norm_normed = __norm(norm)
# use the normalized norm to make sure no duplicated
if norm_normed not in r_dict:
if add_norm == 'yes':
r_dict[norm_normed] = {
"dict": set([norm_normed]),
"items": [norm_normed]
}
else:
r_dict[norm_normed] = {
"dict": set([]),
"items": []
}
if tmode == 'more':
term_list = term.split(';')
for _term in term_list:
term_normed = __term(term)
# skip those useless terms
if term_normed in SKIP_TERMS: continue
# add the normed terms
if term_normed not in r_dict[norm_normed]["dict"]:
# ok, a new term
r_dict[norm_normed]["dict"].add(term_normed)
# add the original term to list
r_dict[norm_normed]["items"].append(_term)
else:
# usually just one term
term_normed = __term(term)
# skip those useless terms
if term_normed in SKIP_TERMS: continue
# add the normed terms
if term_normed not in r_dict[norm_normed]["dict"]:
# ok, a new term
r_dict[norm_normed]["dict"].add(term_normed)
# add the original term to list
r_dict[norm_normed]["items"].append(term)
print('* found %s norms' % (
len(r_dict)
))
# now begin to save the files
# first, mkdir
# before saving, make sure the foler exsits
folders = [
os.path.join( output, ruleset_name ),
os.path.join( output, ruleset_name, 'rules' ),
os.path.join( output, ruleset_name, 'regexp' ),
os.path.join( output, ruleset_name, 'context' ),
]
for folder in folders:
if not os.path.exists(folder):
os.makedirs(folder, exist_ok=True)
print('* created %s' % folder)
else:
print('* found %s' % folder)
# then create the files according to norm and text
matchrules = []
used_resources = []
for norm in tqdm(r_dict):
items = r_dict[norm]["items"]
text = '\n'.join(items)
# save the rules
matchrule = mk_matchrule(norm)
matchrules.append(matchrule)
# save the regexp file
fn = create_file_regexp(ruleset_name, norm, text, output)
# add this file to used resources
used_resources.append(fn)
# save the matchrules
fn = create_file_matchrules(
ruleset_name, '\n'.join(matchrules), output
)
used_resources.append(fn)
# save the used_resources
used_resources.append(os.path.join(
'.', TPL_USED_RES_FILENAME
))
create_file_used_resources(
ruleset_name, '\n'.join(used_resources), output
)
print('* done')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Converter for Building MedTagger Ruleset')
# add paramters
parser.add_argument("fn", type=str,
help="The path to the data file (csv, tsv, xls, or xlsx)")
parser.add_argument("--tmode", type=str, default='one',
choices=['one', 'more'],
help="The term mode in the second column, one term or more?")
parser.add_argument("--add_norm", type=str, default='yes',
choices=['yes', 'no'], help="add norm itself to list?")
parser.add_argument("--out", type=str,
help="The output folder")
# parse
args = parser.parse_args()
convert_to_ruleset(args.fn, args.out, args.tmode, args.add_norm)
|
#!/usr/bin/env python3
import json
import os
print('Content-Type: text/html')
print()
payload = {
# Question 1
'environment_variables': dict(os.environ),
# Question 2
'query_parameter': os.environ['HTTP_USER_AGENT'],
# Question 3
'browser_info': os.environ['HTTP_USER_AGENT']
}
for section_title, contents in payload.items():
print(f'''
<h1>{section_title}</h1>
<pre>{contents}</pre>
''')
|
import femagtools
def create_fsl():
machine = dict(
name="PM 130 L4",
lfe=0.1,
poles=4,
outer_diam=0.04,
bore_diam=0.022,
inner_diam=0.005,
airgap=0.001,
stator=dict(
num_slots=12,
statorBG=dict(
yoke_diam_ins=0.0344,
slot_h1=0.0005,
slot_h3=0.004,
slot_width=0.00395,
slot_r1=0.0008,
slot_r2=0.0007,
tooth_width=0.0032,
middle_line=1,
tip_rad=-0.03,
slottooth=0)
),
magnet=dict(
magnetSector=dict(
magn_num=1,
magn_width_pct=0.8,
magn_height=0.002,
magn_shape=0.0,
bridge_height=0.0,
magn_type=1,
condshaft_r=0.005,
magn_ori=2,
magn_rfe=0.0,
bridge_width=0.0,
magn_len=1.0)
),
windings=dict(
num_phases=3,
num_wires=10,
coil_span=1,
num_layers=2)
)
return femagtools.create_fsl(machine)
if __name__ == '__main__':
import os
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s')
modelname = os.path.split(__file__)[-1].split('.')[0]
logger = logging.getLogger(modelname)
workdir = os.path.join(os.path.expanduser('~'), 'femag')
with open(os.path.join(workdir, modelname+'.fsl'), 'w') as f:
f.write('\n'.join(create_fsl()))
logger.info("FSL %s created",
os.path.join(workdir, modelname+'.fsl'))
|
import sys
class AutoIndent(object):
def __init__(self, stream):
self.stream = stream
self.offset = 0
self.frame_cache = {}
def indent_level(self):
i = 0
base = sys._getframe(2)
f = base.f_back
while f:
if id(f) in self.frame_cache:
i += 1
f = f.f_back
if i == 0:
# clear out the frame cache
self.frame_cache = {id(base): True}
else:
self.frame_cache[id(base)] = True
return i
def write(self, stuff):
indentation = ' ' * self.indent_level()
def indent(l):
if l:
return indentation + l
else:
return l
stuff = '\n'.join([indent(line) for line in stuff.split('\n')])
self.stream.write(stuff)
--------------------------------------------------------------------------
>>> # Example usage
>>>
>>> def f(x):
... print "f(%s)" % x
... if x == 0:
... return 0
... elif x == 1:
... return 1
... else:
... return f(x-1) + f(x-2)
>>>
>>> import sys
>>> sys.stdout = AutoIndent(sys.stdout)
>>>
>>> f(6)
f(6)
f(5)
f(4)
f(3)
f(2)
f(1)
f(0)
f(1)
f(2)
f(1)
f(0)
f(3)
f(2)
f(1)
f(0)
f(1)
f(4)
f(3)
f(2)
f(1)
f(0)
f(1)
f(2)
f(1)
f(0)
8
>>>
|
"""
Helper functions to query and send
commands to the controller.
"""
import requests
REQUESTS_TIMEOUT = 10
def status_schedule(token):
"""
Returns the json string from the Hydrawise server after calling
statusschedule.php.
:param token: The users API token.
:type token: string
:returns: The response from the controller. If there was an error returns
None.
:rtype: string or None
"""
url = 'https://app.hydrawise.com/api/v1/statusschedule.php'
payload = {
'api_key': token,
'hours': 168}
get_response = requests.get(url, params=payload, timeout=REQUESTS_TIMEOUT)
if get_response.status_code == 200 and \
'error_msg' not in get_response.json():
return get_response.json()
return None
def customer_details(token):
"""
Returns the json string from the Hydrawise server after calling
customerdetails.php.
:param token: The users API token.
:type token: string
:returns: The response from the controller. If there was an error returns
None.
:rtype: string or None.
"""
url = 'https://app.hydrawise.com/api/v1/customerdetails.php'
payload = {
'api_key': token,
'type': 'controllers'}
get_response = requests.get(url, params=payload, timeout=REQUESTS_TIMEOUT)
if get_response.status_code == 200 and \
'error_msg' not in get_response.json():
return get_response.json()
return None
def set_zones(token, action, relay=None, time=None):
"""
Controls the zone relays to turn sprinklers on and off.
:param token: The users API token.
:type token: string
:param action: The action to perform. Available actions are: run, runall,
stop, stopall, suspend, and suspendall.
:type action: string
:param relay: The zone to take action on. If no zone is specified then the
action will be on all zones.
:type relay: int or None
:param time: The number of seconds to run or unix epoch time to suspend.
:type time: int or None
:returns: The response from the controller. If there was an error returns
None.
:rtype: string or None
"""
# Actions must be one from this list.
action_list = [
'run', # Run a zone for an amount of time.
'runall', # Run all zones for an amount of time.
'stop', # Stop a zone.
'stopall', # stop all zones.
'suspend', # Suspend a zone for an amount of time.
'suspendall' # Suspend all zones.
]
# Was a valid action specified?
if action not in action_list:
return None
# Set the relay id if we are operating on a single relay.
if action in ['runall', 'stopall', 'suspendall']:
if relay is not None:
return None
else:
relay_cmd = ''
else:
relay_cmd = '&relay_id={}'.format(relay)
# Add a time argument if the action requires it.
if action in ['run', 'runall', 'suspend', 'suspendall']:
if time is None:
return None
else:
custom_cmd = '&custom={}'.format(time)
period_cmd = '&period_id=999'
else:
custom_cmd = ''
period_cmd = ''
# If action is on a single relay then make sure a relay is specified.
if action in ['stop', 'run', 'suspend'] and relay is None:
return None
get_response = requests.get('https://app.hydrawise.com/api/v1/'
'setzone.php?'
'&api_key={}'
'&action={}{}{}{}'
.format(token,
action,
relay_cmd,
period_cmd,
custom_cmd),
timeout=REQUESTS_TIMEOUT)
if get_response.status_code == 200 and \
'error_msg' not in get_response.json():
return get_response.json()
return None
|
"""
A permutation of length n is an ordering of the positive integers {1,2,…,n}. For example, π=(5,3,2,1,4)
is a permutation of length 5.
Given: A positive integer n≤7.
Return: The total number of permutations of length n, followed by a list of all such permutations (in any order).
"""
from typing import Sequence
from itertools import permutations
def get_permutations(lines: Sequence[str]) -> str:
"""
:param lines: A line with n, a positive integer
:return: Number of permutations followed by permutations of numbers between 1 and n
"""
if len(lines) != 1:
raise ValueError("There must be only 1 line of input")
line, *_ = lines[0]
n = int(line)
if n <= 0:
raise ValueError("N must be a positive integer")
# casting to list to get the total number
permutations_ = list(permutations(range(1, n+1)))
formatted_permutations = '\n'.join([' '.join((str(number) for number in nums)) for nums in permutations_])
return f"{len(permutations_)}\n{formatted_permutations}"
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import re
import sys
import struct
from Peach.generator import Generator
from Peach.Generators.dictionary import *
from Peach.Generators.static import *
import Peach.Generators.static
class XmlCreateElements(Generator):
"""
This generator create XML elements N deep
"""
_startingDepth = 1
_increment = 1
_nodePrefix = Static('PeachFuzz')
_nodePostfix = None
_elementAttributs = None
_currentDepth = 1
_maxDepth = 1000
def __init__(self, group, startingDepth=None, increment=None,
maxDepth=None, nodePrefix=None, nodePostfix=None,
elementAttributes=None):
"""
@type group: Group
@param group: Group to use
@type startingDepth: integer
@param startingDepth: How many deep to start at, default 1
@type increment: integer
@param increment: Incrementor, default 1
@type maxDepth: integer
@param maxDepth: Max depth, default 1000
@type nodePrefix: Generator
@param nodePrefix: Node prefix, default is Static('PeachFuzz')
@type nodePostfix: Generator
@param nodePostfix: Node postfix, default is None
@type elementAttributes: Generator
@param elementAttributes: Element attributes, default is None
"""
self.setGroup(group)
if startingDepth is not None:
self._startingDepth = startingDepth
if increment is not None:
self._increment = increment
if nodePrefix is not None:
self._nodePrefix = nodePrefix
if nodePostfix is not None:
self._nodePostfix = nodePostfix
if elementAttributes is not None:
self._elementAttributes = elementAttributes
if maxDepth is not None:
self._maxDepth = maxDepth
def next(self):
self._currentDepth += self._increment
if self._currentDepth > self._maxDepth:
raise generator.GeneratorCompleted("XmlCreateNodes")
def getRawValue(self):
ret = ''
postFixs = []
for i in range(self._currentDepth):
if self._nodePostfix is not None:
postFixs[i] = self._nodePostfix.getValue()
if self._elementAttributes is not None:
ret += "<%s%s %s>" % (self._nodePrefix.getValue(), postFixs[i],
self._elementAttributes.getValue())
else:
ret += "<%s%s>" % (self._nodePrefix.getValue(), postFixs[i])
else:
if self._elementAttributes is not None:
ret += "<%s %s>" % (self._nodePrefix.getValue(),
self._elementAttributes.getValue())
else:
ret += "<%s>" % self._nodePrefix.getValue()
for j in range(self._currentDepth):
if self._nodePostfix is not None:
ret += "</%s%s>" % (self._nodePrefix.getValue(), postFixs[i - j])
else:
ret += "</%s>" % self._nodePrefix.getValue()
return ret
def reset(self):
self._currentDepth = 1
@staticmethod
def unittest():
expected = '<PeachFuzz1><PeachFuzz2><PeachFuzz3></PeachFuzz3></PeachFuzz2></PeachFuzz1>'
g = XmlCreateNodes(1, 1)
g.next()
g.next()
g.next()
if g.getRawValue() != expected:
print("FAILURE!!! XmlCreateNodes")
class XmlCreateNodes(Generator):
"""
This generator create XML nodes N deep
"""
_startingDepth = 1
_increment = 1
_nodePrefix = Static('PeachFuzz')
_currentDepth = 1
_maxDepth = 1000
def __init__(self, group, startingDepth, increment, maxDepth, nodePrefix):
"""
@type group: Group
@param group: Group to use
@type startingDepth: integer
@param startingDepth: How many deep to start at, default 1
@type increment: integer
@param increment: Incrementor, default 1
@type maxDepth: integer
@param maxDepth: Max depth, default 1000
@type nodePrefix: Generator
@param nodePrefix: Node prefix, default is Static('PeachFuzz')
"""
self.setGroup(group)
if startingDepth is not None:
self._startingDepth = startingDepth
if increment is not None:
self._increment = increment
if nodePrefix is not None:
self._nodePrefix = nodePrefix
if maxDepth is not None:
self._maxDepth = maxDepth
def next(self):
self._currentDepth += self._increment
if self._currentDepth > self._maxDepth:
raise generator.GeneratorCompleted("XmlCreateNodes")
def getRawValue(self):
ret = ''
for i in range(self._currentDepth):
ret += "<%s%d>" % (self._nodePrefix.getValue(), i)
for j in range(self._currentDepth):
ret += "</%s%d>" % (self._nodePrefix.getValue(), i - j)
return ret
def reset(self):
self._currentDepth = 1
@staticmethod
def unittest():
expected = '<PeachFuzz1><PeachFuzz2><PeachFuzz3></PeachFuzz3></PeachFuzz2></PeachFuzz1>'
g = XmlCreateNodes(1, 1)
g.next()
g.next()
g.next()
if g.getRawValue() != expected:
print("FAILURE!!! XmlCreateNodes")
class XmlParserTests(Generator):
"""
W3C XML Validation Tests. This includes
all sets of tests, invalid, non-well formed, valid and error.
NOTE: Test files are in samples/xmltests.zip these are the
latest test cases from W3C as of 02/23/06 for XML.
"""
def __init__(self, group, testFiles=None):
"""
@type group: Group
@param group: Group this Generator belongs to
@type testFiles: string
@param testFiles: Location of test files
"""
Generator.__init__(self)
p = None
if not (hasattr(sys, "frozen") and sys.frozen == "console_exe"):
p = Peach.Generators.static.__file__[:-10]
else:
p = os.path.dirname(os.path.abspath(sys.executable))
testFiles = os.path.join(p, "xmltests")
self._generatorList = GeneratorList(group,
[XmlParserTestsInvalid(None, testFiles),
XmlParserTestsNotWellFormed(None, testFiles),
XmlParserTestsValid(None, testFiles)])
def getRawValue(self):
return self._generatorList.getRawValue()
def next(self):
self._generatorList.next()
class XmlParserTestsGeneric(Generator):
"""
Base class
"""
def __init__(self, group, testsFolder, testsFile):
"""
@type group: Group
@param group: Group this Generator belongs to
@type testsFolder: string
@param testsFolder: Location of test files
@type testsFile: string
@param testsFile: File with listing of test files
"""
Generator.__init__(self)
self._testsFolder = 'xmltests'
self._testsFile = 'invalid.txt'
self._currentValue = None
self._currentTestNum = 1
self._currentFilename = None
self._fdTests = None
self._fd = None
self.setGroup(group)
if testsFile is not None:
self._testsFile = testsFile
if testsFolder is not None:
self._testsFolder = testsFolder
def next(self):
if self._fdTests is None:
fileName = os.path.join(self._testsFolder, self._testsFile)
self._fdTests = open(fileName, 'rb')
self._currentFilename = os.path.join(self._testsFolder,
self._fdTests.readline())
self._currentFilename = self._currentFilename.strip("\r\n")
if len(self._currentFilename) <= len(self._testsFolder) + 2:
raise generator.GeneratorCompleted(
"Peach.Generators.xml.XmlParserTestsInvalid")
if self._fd is None:
self._fd = open(self._currentFilename, 'rb')
if self._fd is None:
raise Exception('Unable to open', self._currentFilename)
self._currentValue = self._fd.read()
self._fd = None
def getRawValue(self):
if self._currentValue is None:
self.next()
return self._currentValue
def reset(self):
self._fd = None
self._fdTests = None
self._currentValue = None
@staticmethod
def unittest():
pass
class XmlParserTestsInvalid(XmlParserTestsGeneric):
"""
W3C XML Validation Tests, invalid set only.
NOTE: Test files are in samples/xmltests.zip these are the
latest test cases from W3C as of 02/23/06 for XML.
"""
def __init__(self, group, testsFolder):
"""
@type group: Group
@param group: Group this Generator belongs to
@type testsFolder: string
@param testsFolder: Location of test files
"""
XmlParserTestsGeneric.__init__(self, group, testsFolder, None)
self.setGroup(group)
self._testsFile = 'invalid.txt'
if testsFolder is not None:
self._testsFolder = testsFolder
class XmlParserTestsValid(XmlParserTestsGeneric):
"""
W3C XML Validation Tests, valid set only.
NOTE: Test files are in samples/xmltests.zip these are the
latest test cases from W3C as of 02/23/06 for XML.
"""
def __init__(self, group, testsFolder):
"""
@type group: Group
@param group: Group this Generator belongs to
@type testsFolder: string
@param testsFolder: Location of test files
"""
XmlParserTestsGeneric.__init__(self, group, testsFolder, None)
self.setGroup(group)
self._testsFile = 'valid.txt'
if testsFolder is not None:
self._testsFolder = testsFolder
class XmlParserTestsError(XmlParserTestsGeneric):
"""
W3C XML Validation Tests, error set only.
NOTE: Test files are in samples/xmltests.zip these are the
latest test cases from W3C as of 02/23/06 for XML.
"""
def __init__(self, group, testsFolder):
"""
@type group: Group
@param group: Group this Generator belongs to
@type testsFolder: string
@param testsFolder: Location of test files
"""
XmlParserTestsGeneric.__init__(self, group, testsFolder, None)
self.setGroup(group)
self._testsFile = 'error.txt'
if testsFolder is not None:
self._testsFolder = testsFolder
class XmlParserTestsNotWellFormed(XmlParserTestsGeneric):
"""
W3C XML Validation Tests, Invalid set only.
NOTE: Test files are in samples/xmltests.zip these are the
latest test cases from W3C as of 02/23/06 for XML.
"""
def __init__(self, group, testsFolder):
"""
@type group: Group
@param group: Group this Generator belongs to
@type testsFolder: string
@param testsFolder: Location of test files
"""
XmlParserTestsGeneric.__init__(self, group, testsFolder, None)
self.setGroup(group)
self._testsFile = 'nonwf.txt'
if testsFolder is not None:
self._testsFolder = testsFolder
|
def __sort_array(a_list):
result = a_list
for i in range(0, len(result) - 1, 1):
for j in range(i + 1, len(result), 1):
if result[i] > result[j]:
result[i], result[j] = result[j], result[i]
return result
def find_the_pair_of_values(first_array, second_array):
__sort_array(first_array)
__sort_array(second_array)
minimum_value = 999999999
result = [None, None]
it = 0
jit = 0
while it < len(first_array) and jit < len(second_array):
if minimum_value > abs(first_array[it] - second_array[jit]):
result[0] = first_array[it]
result[1] = second_array[jit]
minimum_value = abs(first_array[it] - second_array[jit])
if first_array[it] < second_array[jit]:
it += 1
else:
jit += 1
return result
if __name__ == '__main__':
print(find_the_pair_of_values([4, 17, 3, 49, 9], [127, 36, 7, 220, 25, 70]))
|
# -*- coding: utf-8 -*-
import os
import re
import struct
import sys
from socket import inet_ntoa
from lumbermill.BaseThreadedModule import BaseThreadedModule
from lumbermill.utils.Decorators import ModuleDocstringParser
@ModuleDocstringParser
class NetFlow(BaseThreadedModule):
r"""
Netflow parser
Decode netflow packets.
source_field: Input field to decode.
target_field: Event field to be filled with the new data.
Configuration template:
- parser.NetFlow:
source_field: # <default: 'data'; type: string; is: optional>
target_field: # <default: 'data'; type: string; is: optional>
keep_original: # <default: False; type: boolean; is: optional>
receivers:
- NextModule
"""
module_type = "parser"
"""Set module type"""
NF_V5_HEADER_LENGTH = 24
NF_V5_RECORD_LENGTH = 48
TH_FIN = 0x01 # end of data
TH_SYN = 0x02 # synchronize sequence numbers
TH_RST = 0x04 # reset connection
TH_PUSH = 0x08 # push
TH_ACK = 0x10 # acknowledgment number set
TH_URG = 0x20 # urgent pointer set
TH_ECE = 0x40 # ECN echo, RFC 3168
TH_CWR = 0x80 # congestion window reduced
IP_PROTOCOLS = {}
# Helper functions
def readProtocolInfo(self):
path = "%s/../assets/ip_protocols" % os.path.dirname(os.path.realpath(__file__))
r = re.compile("(?P<proto>\S+)\s+(?P<num>\d+)")
with open(path, 'r') as f:
for line in f:
m = r.match(line)
if not m:
continue
NetFlow.IP_PROTOCOLS[int(m.group("num"))] = m.group("proto")
def configure(self, configuration):
# Call parent configure method
BaseThreadedModule.configure(self, configuration)
self.source_field = self.getConfigurationValue('source_field')
self.target_field = self.getConfigurationValue('target_field')
self.drop_original = not self.getConfigurationValue('keep_original')
self.readProtocolInfo()
def getTcpFflags(self, flags):
ret = []
if flags & NetFlow.TH_FIN:
ret.append('FIN')
if flags & NetFlow.TH_SYN:
ret.append('SYN')
if flags & NetFlow.TH_RST:
ret.append('RST')
if flags & NetFlow.TH_PUSH:
ret.append('PUSH')
if flags & NetFlow.TH_ACK:
ret.append('ACk')
if flags & NetFlow.TH_URG:
ret.append('URG')
if flags & NetFlow.TH_ECE:
ret.append('ECE')
if flags & NetFlow.TH_CWR:
ret.append('CWR')
return ret
def decodeVersion5(self, raw_nf_data, record_count):
nf_data = {}
(nf_data['sys_uptime'], nf_data['unix_secs'], nf_data['unix_nsecs'], nf_data['flow_sequence'], nf_data['engine_type'], nf_data['engine_id'], nf_data['sampling_interval']) = struct.unpack('!IIIIBBH', raw_nf_data[4:24])
for i in range(0, record_count):
record_starts_at = NetFlow.NF_V5_HEADER_LENGTH + (i * NetFlow.NF_V5_RECORD_LENGTH)
record = raw_nf_data[record_starts_at:record_starts_at+NetFlow.NF_V5_RECORD_LENGTH]
# Decode record, except src and dest addresses.
decoded_record = struct.unpack('!HHIIIIHHBBBBHHBBH', record[12:])
nf_data['srcaddr'] = inet_ntoa(record[:4])
nf_data['dstaddr'] = inet_ntoa(record[4:8])
nf_data['nexthop'] = inet_ntoa(record[8:12])
nf_data['snmp_index_in_interface'] = decoded_record[0]
nf_data['snmp_index_out_interface'] = decoded_record[1]
nf_data['packet_count'] = decoded_record[2]
nf_data['byte_count'] = decoded_record[3]
nf_data['uptime_start'] = decoded_record[4]
nf_data['uptime_end'] = decoded_record[5]
nf_data['srcport'] = decoded_record[6]
nf_data['dstport'] = decoded_record[7]
nf_data['tcp_flags_binary'] = decoded_record[9]
nf_data['tcp_flags'] = self.getTcpFflags(decoded_record[9])
nf_data['prot'] = decoded_record[10]
nf_data['prot_name'] = NetFlow.IP_PROTOCOLS[decoded_record[10]]
nf_data['tos'] = decoded_record[11]
nf_data['src_as'] = decoded_record[12]
nf_data['dst_as'] = decoded_record[13]
nf_data['src_mask'] = decoded_record[14]
nf_data['dst_mask'] = decoded_record[15]
yield nf_data
def handleEvent(self, event):
if self.source_field not in event:
yield event
return
raw_nf_data = event[self.source_field]
# Try to get netflow version.
try:
(version, record_count) = struct.unpack('!HH', raw_nf_data[0:4])
except:
etype, evalue, etb = sys.exc_info()
self.logger.warning("Could not detect netflow version: %s. Exception: %s, Error: %s." % (raw_nf_data, etype, evalue))
yield event
# Call decoder for detected version.
try:
decoder_func = getattr(self, "decodeVersion%s" % version)
except AttributeError:
etype, evalue, etb = sys.exc_info()
self.logger.error("Netflow parser does not implement decoder for netflow version: %s. Exception: %s, Error: %s" % (version, etype, evalue))
self.lumbermill.shutDown()
copy_event = False
for netflow_data in decoder_func(raw_nf_data, record_count):
if copy_event:
event = event.copy()
copy_event = True
if self.drop_original and self.source_field is not self.target_field:
event.pop(self.source_field, None)
event[self.target_field] = netflow_data
event['lumbermill']['event_type'] = "NetFlowV%s" % version
yield event
|
class FilterModule(object):
def filters(self):
return {
'get_zone_interfaces': self.get_zone_interfaces,
}
def get_zone_interfaces(self, zones):
dmz_lab_interfaces = list()
home_interfaces = list()
internet_interfaces = list()
if 'multi-routing-engine-results' in zones:
zone_information = zones['multi-routing-engine-results']['multi-routing-engine-item']['zones-information']['zones-security']
if isinstance(zone_information,list):
for each in zone_information:
if each['zones-security-zonename'] == "DMZ_LAB":
if isinstance(each['zones-security-interfaces']['zones-security-interface-name'],list):
for interface in each['zones-security-interfaces']['zones-security-interface-name']:
dmz_lab_interfaces.append(interface)
else:
dmz_lab_interfaces.append(each['zones-security-interfaces']['zones-security-interface-name'])
elif each['zones-security-zonename'] == "HOME":
if isinstance(each['zones-security-interfaces']['zones-security-interface-name'],list):
for interface in each['zones-security-interfaces']['zones-security-interface-name']:
home_interfaces.append(interface)
else:
home_interfaces.append(each['zones-security-interfaces']['zones-security-interface-name'])
elif each['zones-security-zonename'] == "INTERNET":
if isinstance(each['zones-security-interfaces']['zones-security-interface-name'],list):
for interface in each['zones-security-interfaces']['zones-security-interface-name']:
internet_interfaces.append(interface)
else:
internet_interfaces.append(each['zones-security-interfaces']['zones-security-interface-name'])
else:
pass
zone_ifaces = dict()
zone_ifaces['dmz_lab_interfaces'] = dmz_lab_interfaces
zone_ifaces['home_interfaces'] = home_interfaces
zone_ifaces['internet_interfaces'] = internet_interfaces
return zone_ifaces
elif 'zones-information' in zones:
zone_information = zones['zones-information']['zones-security']
if isinstance(zone_information,list):
for each in zone_information:
if each['zones-security-zonename'] == "DMZ_LAB":
if isinstance(each['zones-security-interfaces']['zones-security-interface-name'],list):
for interface in each['zones-security-interfaces']['zones-security-interface-name']:
dmz_lab_interfaces.append(interface)
else:
dmz_lab_interfaces.append(each['zones-security-interfaces']['zones-security-interface-name'])
elif each['zones-security-zonename'] == "HOME":
if isinstance(each['zones-security-interfaces']['zones-security-interface-name'],list):
for interface in each['zones-security-interfaces']['zones-security-interface-name']:
home_interfaces.append(interface)
else:
home_interfaces.append(each['zones-security-interfaces']['zones-security-interface-name'])
elif each['zones-security-zonename'] == "INTERNET":
if isinstance(each['zones-security-interfaces']['zones-security-interface-name'],list):
for interface in each['zones-security-interfaces']['zones-security-interface-name']:
internet_interfaces.append(interface)
else:
internet_interfaces.append(each['zones-security-interfaces']['zones-security-interface-name'])
else:
pass
zone_ifaces = dict()
zone_ifaces['dmz_lab_interfaces'] = dmz_lab_interfaces
zone_ifaces['home_interfaces'] = home_interfaces
zone_ifaces['internet_interfaces'] = internet_interfaces
return zone_ifaces
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
# -----------------------------------------------------------------------------
# Standard library imports
import time
# Local imports
from spyder.plugins.preferences.api import PreferencePages
from spyder.plugins.maininterpreter.plugin import MainInterpreter
from spyder.plugins.preferences.tests.conftest import MainWindowMock
from spyder.utils.conda import get_list_conda_envs
from spyder.utils.pyenv import get_list_pyenv_envs
# Get envs to show them in the Main interpreter page. This is actually
# done in a thread in the InterpreterStatus widget.
# We also recording the time needed to get them to compare it with the
# loading time of that config page.
t0 = time.time()
get_list_conda_envs()
get_list_pyenv_envs()
GET_ENVS_TIME = time.time() - t0
def test_load_time(qtbot):
from spyder.plugins.maininterpreter.confpage import (
MainInterpreterConfigPage)
# Create Preferences dialog
main = MainWindowMock()
preferences = main.preferences
preferences.config_pages.pop(PreferencePages.General)
main_interpreter = MainInterpreter(main)
main.register_plugin(main_interpreter)
# Create page and measure time to do it
t0 = time.time()
preferences.open_dialog(None)
load_time = time.time() - t0
container = preferences.get_container()
dlg = container.dialog
widget = dlg.get_page()
# Assert the combobox is populated with the found envs
assert widget.cus_exec_combo.combobox.count() > 0
# Assert load time is smaller than the one required to get envs
# directly. This means we're using the cached envs instead
assert load_time < GET_ENVS_TIME
# Load time should be small too because we perform simple validations
# on the page.
assert load_time < 0.5
|
import serial
# Serial port configuration
ser = serial.Serial()
ser.port = 'COM3'
ser.baudrate = 4800
ser.open()
# Send data continuously
while ser.isOpen():
#print(ser.portstr) # check which port was really used
ser.write(bytes("150", 'ascii')) # write a string
#ser.close() # close port
|
# Copyright (c) 2020, 2021, Oracle and/or its affiliates.
#
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
#
from utils import auxutil
from setup import defaults
class Config:
version_tag = defaults.VERSION_TAG
min_supported_version = defaults.MIN_SUPPORTED_VERSION
max_supported_version = defaults.MAX_SUPPORTED_VERSION
image_registry = defaults.IMAGE_REGISTRY
image_repository = defaults.IMAGE_REPOSITORY
image_registry_host = ""
image_registry_port = ""
image_registry_is_loopback = ""
operator_image_name = defaults.OPERATOR_IMAGE_NAME
operator_ee_image_name = defaults.OPERATOR_EE_IMAGE_NAME
operator_version_tag = defaults.OPERATOR_VERSION_TAG
operator_pull_policy = defaults.OPERATOR_PULL_POLICY
operator_gr_ip_whitelist = defaults.OPERATOR_GR_IP_WHITELIST
# server
server_version_tag = defaults.SERVER_VERSION_TAG
server_image_name = defaults.SERVER_IMAGE_NAME
server_ee_image_name = defaults.SERVER_EE_IMAGE_NAME
# router
router_version_tag = defaults.ROUTER_VERSION_TAG
router_image_name = defaults.ROUTER_IMAGE_NAME
router_ee_image_name = defaults.ROUTER_EE_IMAGE_NAME
# oci
oci_skip = defaults.OCI_SKIP
oci_backup_apikey_path = defaults.OCI_BACKUP_APIKEY_PATH
oci_restore_apikey_path = defaults.OCI_RESTORE_APIKEY_PATH
oci_backup_bucket = defaults.OCI_BACKUP_BUCKET
@property
def operator_shell_version_num(self):
a,b,c = self.operator_version_tag.split("-")[0].split(".")
return int(a)*10000 + int(b)*100 + int(c)
def commit(self):
if self.image_registry:
self.image_registry_host, self.image_registry_port, self.image_registry_is_loopback = auxutil.resolve_registry_url(self.image_registry)
def get_old_version_tag(self):
return self.min_supported_version
def get_image_registry_repository(self):
if self.image_registry:
if self.image_repository:
return self.image_registry + "/" + self.image_repository
else:
return self.image_registry
else:
return self.image_repository
def get_operator_image(self):
return f"{self.get_image_registry_repository()}/{self.operator_image_name}:{self.operator_version_tag}"
def get_server_image(self, version=None):
return f"{self.get_image_registry_repository()}/{self.server_image_name}:{version if version else self.version_tag}"
def get_old_server_image(self):
return self.get_server_image(self.get_old_version_tag())
def get_router_image(self):
return f"{self.get_image_registry_repository()}/{self.router_image_name}:{self.version_tag}"
# test-suite configuration
g_ts_cfg = Config()
|
n = int(input())
for _ in range(n):
c = 0
linha = input()
esquerda = []
for x, n in enumerate(linha):
if n == '<':
esquerda.append(n)
elif n == '>' and esquerda:
esquerda.pop()
c += 1
print(c)
|
# Copyright (c) 2015,
# Philipp Hertweck
#
# This code is provided under the BSD 2-Clause License.
# Please refer to the LICENSE.txt file for further information.
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
import unittest
from testing import integration_test as integration_test
from testing.tcpdump.Packet import Packet
# Network used in this test
from networks import sample_network as network
class TestPacketAnalyzer(integration_test.IntegrationTestCase):
def setUp(self):
# TestConfiguration
self.CONTROLLER_PATH = '../../controller/routing_switch.py'
self.NETWORK = network.Network
super(TestPacketAnalyzer, self).setUp()
def test_received_packets(self):
self.analyze_packets(["h1","h2"], lambda: self.net.pingAll())
self.assertPacketIn("Host 1 received a packet", "h1")
def test_received_eth_packet(self):
host1, host2 = self.net.get("h1", "h2")
self.analyze_packets(["h1","h2"], lambda: self.net.ping((host1, host2)))
# Analyze packets on h1
packet = Packet()
packet.eth_src = "cc:cc:cc:cc:cc:01"
packet.eth_dst = "cc:cc:cc:cc:cc:02"
self.assertReceivedPacket(packet, "Received eth packet", "h1")
packet1 = Packet()
packet1.eth_src = "aa:aa:aa:aa:aa:aa"
packet1.eth_dst = "cc:cc:cc:cc:cc:02"
self.assertNotReceivedPacket(packet1, "Not received packet from switch.", "h1")
# Analyze packets on h2
packet3 = Packet()
packet3.eth_src = "cc:cc:cc:cc:cc:01"
packet3.eth_dst = "cc:cc:cc:cc:cc:02"
self.assertReceivedPacket(packet3, "Received eth packet", "h2")
packet4 = Packet()
packet4.eth_src = "aa:aa:aa:aa:aa:aa"
packet4.eth_dst = "cc:cc:cc:cc:cc:02"
self.assertNotReceivedPacket(packet4, "Not received packet from switch.", "h2")
if __name__ == '__main__':
unittest.main()
|
from setuptools import setup
install_requires = [
# Pin deps for now, to be upgraded after tests are much expanded.
'Genshi==0.7',
'lxml==3.6.0',
'Pygments==1.4',
'python-dateutil==2.5.3',
'Twisted[tls]==16.4.0',
'klein',
'treq',
'attrs',
]
extras_require = {
'testing': [
'coverage',
'ddt',
],
}
setup(
name='infobob',
version='0.1.0-dev',
author='habnabit',
author_email='_@habnab.it',
maintainer='Colin Dunklau',
maintainer_email='colin.dunklau@gmail.com',
packages=['infobob', 'infobob.tests', 'twisted.plugins'],
include_package_data=True,
install_requires=install_requires,
extras_require=extras_require,
)
|
import numpy as np
class Settings:
"""
Class for different program settings. At the start these settings are loaded from predefined file.
"""
def __init__(self):
"""
This function has different properties:
Attributes
----------
self.derivative_dx : float
Increment size for derivative calculation.
self.formalism : str
Possible two values. Palatini or Metric, by default None.
"""
self.formalism = "Metric"
self.scalar_field_range = [0, 10]
self.scalar_field_step = 1e-6
self.scalar_field_step_plot = 1e-6
self.scalar_field_domain = np.arange(
self.scalar_field_range[0], self.scalar_field_range[1]+self.scalar_field_step, self.scalar_field_step)
self.scalar_field_domain_plot = np.arange(
self.scalar_field_range[0], self.scalar_field_range[1]+self.scalar_field_step_plot, self.scalar_field_step_plot)
self.N_values = [50, 60]
self.N_range = [0, 100]
self.N_step = 0.1
self.N_list = np.unique(
np.concatenate(
(np.arange(self.N_range[0], self.N_range[1]+self.N_step, self.N_step),
self.N_range[1] +
np.exp(np.arange(0, 10+self.N_step, self.N_step)),
self.N_values)
)
)
# Numerical calculation
self.derivative_dx = 1e-6
#
self.root_precision = 1e-4
self.simplify = False
def create_interval_list(self):
return np.arange(self.scalar_field_range[0], self.scalar_field_range[1]+self.scalar_field_step, self.scalar_field_step)
|
import flax.linen as nn
import jax.numpy as jnp
from .config import Config
from .wavenet import WaveNetBlock
class DiffWave(nn.Module):
"""DiffWave: A Versatile Diffusion Model for Audio Synthesis.
"""
config: Config
def setup(self):
"""Setup modules.
"""
config = self.config
# [F], fourier coefficient
self.fcoeff = 2 ** jnp.array(config.fourier)
# signal proj
self.proj = nn.Dense(config.channels)
# embedding
self.proj_embed = [
nn.Dense(config.embedding_proj)
for _ in range(config.embedding_layers)]
# mel-upsampler
self.upsample = [
nn.ConvTranspose(
1,
config.upsample_kernels,
config.upsample_strides,
padding='SAME')
for _ in range(config.upsample_layers)]
# wavenet blocks
self.blocks = [
WaveNetBlock(
channels=config.channels,
kernels=config.kernels,
dilations=config.dilations ** i)
for _ in range(config.num_cycles)
for i in range(config.num_layers)]
self.scale = config.num_layers ** -0.5
# output projection
self.proj_context = nn.Dense(config.channels)
self.proj_out = nn.Dense(1)
def __call__(self,
signal: jnp.ndarray,
snr: jnp.ndarray,
mel: jnp.ndarray) -> jnp.ndarray:
"""Estimate noise from signal with respect to given snr and mel-spectrogram.
Args:
signal: [float32; [B, T]], noised signal.
snr: [float32; [B]], normalized signal-to-noise ratio.
mel: [float32; [B, T // H, M]], mel-spectrogram.
Returns:
[float32; [B, T]], estimated noise.
"""
# [B, T, F], fourier features
x = self.fcoeff * signal[..., None] * jnp.pi
# [B, T, F x 2 + 1]
x = jnp.concatenate([signal[..., None], jnp.sin(x), jnp.cos(x)], axis=-1)
# [B, T, C]
x = nn.swish(self.proj(x))
# [B, E']
embed = self.embedding(snr)
# [B, E]
for proj in self.proj_embed:
embed = nn.swish(proj(embed))
# [B, T // H, M, 1]
mel = mel[..., None]
for upsample in self.upsample:
mel = nn.swish(upsample(mel))
# [B, T, M]
mel = mel.squeeze(-1)
# WaveNet
context = 0.
for block in self.blocks:
# [B, T, C], [B, T, C]
x, skip = block(x, embed, mel)
context = context + skip
# [B, T, C]
context = nn.swish(self.proj_context(context * self.scale))
# [B, T]
return nn.tanh(self.proj_out(context)).squeeze(-1)
def embedding(self, snr: jnp.ndarray) -> jnp.ndarray:
"""Generate embedding.
Args:
snr: [float32; [B]], unit normalized signal-to-noise ratio.
Returns:
[float32; [B, E]], embeddings.
"""
# [E // 2]
i = jnp.arange(0, self.config.embedding_size, 2)
# [E // 2]
denom = jnp.exp(-jnp.log(10000) * i / self.config.embedding_size)
# [B, E // 2]
context = snr[:, None] * denom[None] * self.config.embedding_factor
# [B, E // 2, 2]
pe = jnp.stack([jnp.sin(context), jnp.cos(context)], axis=-1)
# [B, E]
return pe.reshape(-1, self.config.embedding_size)
|
#!/usr/bin/env python3
# ==============================================================
# author: Lars Gabriel
#
# runEVM.py: Run EVM for a set of partitions
# ==============================================================
import argparse
import subprocess as sp
import multiprocessing as mp
import os
import csv
import sys
class FileMissing(Exception):
pass
evm = ''
bin = os.path.dirname(os.path.realpath(__file__))
workdir = ''
partition_list = []
weights = ''
threads = 1
def main():
global evm, workdir, partition_list, weights, bin, threads
args = parseCmd()
workdir = os.path.abspath('{}/EVM/{}/'.format(args.species_dir, args.test_level))
evm = os.path.abspath(args.evm_path)
threads = args.threads
# read partition lists
partition_list_path = '{}/partitions/part_test.lst'.format(workdir)
with open(partition_list_path, 'r') as file:
part = csv.reader(file, delimiter='\t')
for p in part:
partition_list.append(p)
# Check if weight file exists
weights = '{}/EVM.weights.tab'.format(workdir)
if not os.path.exists(weights):
raise FileMissing('Weight file is missing at: {}'.format(weights))
'''
for part in partition_list:
prediction(part[3], part[0])
'''
# Run evm predicitons
job_results = []
pool = mp.Pool(threads)
for part in partition_list:
r = pool.apply_async(prediction, (part[3], part[0]))
job_results.append(r)
for r in job_results:
r.wait()
pool.close()
pool.join()
def prediction(exec_dir, contig):
# make a EVM predcition for one partition
part_name = exec_dir.split('/')[-1]
start = int(part_name.split('_')[1].split('-')[0])
chr = part_name.split('_')[0]
# Run EVM
evm_out = '{}/evm.out'.format(exec_dir)
evm_cmd = '{}/evidence_modeler.pl -G genome.fasta.masked -g gene_set.gff'.format(evm) \
+ ' -w {} -e evm_pasa.gff -p evm_protein.gff --exec_dir {} > {} 2> {}.log'.format(\
weights, exec_dir, evm_out, evm_out)
q = sp.Popen(evm_cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE)
stdout, stderr = q.communicate()
if stderr.decode():
sys.stderr.write('Error in {} with: {}'.format(evm_cmd, stderr.decode()))
# check if EVM predicted at least one gene and convert evm.out to gff and gtf format
if not os.stat(evm_out).st_size == 0:
gff_out = '{}/evm.gff'.format(exec_dir, part_name)
cmd = '{}/EvmUtils/EVM_to_GFF3.pl {} {} > {}'.format(evm, evm_out, contig, gff_out)
sp.call(cmd, shell=True)
gtf_out = '{}/evm.gtf'.format(exec_dir, part_name)
cmd = '{}/gff32gtf.py --gff {} --out {}'.format(bin, gff_out, gtf_out)
sp.call(cmd, shell=True)
def parseCmd():
"""Parse command line arguments
Returns:
dictionary: Dictionary with arguments
"""
parser = argparse.ArgumentParser(description='Run EVM for a set of partitions')
parser.add_argument('--species_dir', type=str,
help='Directory containing the results of TSEBRA-experiment 1 for one species')
parser.add_argument('--test_level', type=str,
help='One of "species_excluded", "family_excluded" or "order_excluded"')
parser.add_argument('--evm_path', type=str,
help='Path to the directory where EVidenceModeler is installed')
parser.add_argument('--threads', type=int,
help='')
return parser.parse_args()
if __name__ == '__main__':
main()
|
import regex as re
def remove_feminine_infl(text):
text = re.sub('(.*)/in(?!\w)', r'\1', text)
text = re.sub('(.*)/n(?!\w)', r'\1', text)
return text
def remove_article(text):
if not re.search('^ein\s+((für alle)|(paar))', text):
text = re.sub('^(?:die|der|das|dem|den|ein|eine|einen|einem)\s+', '', text)
return text
|
from typing import Text
from ..request import get
from ..Model.ApiModel import Channel
from pydantic import BaseModel
class all_channel(BaseModel):
ch:list[Channel]
def get_channel(channel_id:Text) -> Channel:
"""获取子频道信息"""
channel = get(f"/channels/{channel_id}")
channel_info = Channel(**channel)
return channel_info
def get_guilds_all_channel(guild_id:Text) -> list[Channel]:
"""获取频道下的子频道列表"""
channel = get(f"/guilds/{guild_id}/channels")
channel_info = all_channel(**{"ch":channel})
return channel_info.ch
|
# Standard lib imports
import logging
# Third party imports
# None
# Project level imports
# None
log = logging.getLogger(__name__)
class UserInfo(object):
def __init__(self, connection):
"""
Initialize a new instance
"""
self.conn = connection
def whoami(self):
"""
Example JSON result from the API:
{
u'common_name': u'ecsadmin@internal',
u'distinguished_name': u'',
u'namespace': u'',
u'roles': [
u'SYSTEM_ADMIN'
]
}
"""
log.info('Getting my own user info (whoami)')
return self.conn.get('user/whoami')
|
class Solution(object):
def thirdMax(self, nums):
n1 = n2 = n3 = None
for e in nums:
if e != n1 and e != n2 and e != n3:
if n1 is None or e > n1:
n1, n2, n3 = e, n1, n2
elif n2 is None or e > n2:
n2, n3 = e, n2
elif n3 is None or e > n3:
n3 = e
return n1 if n3 is None else n3
|
import http
from flask import request
from flask_restful import Resource, reqparse
from models import User
from utils.decorators import api_response_wrapper
from utils.rate_limit import rate_limit
from .login_service import generate_jwt_tokens
parser = reqparse.RequestParser()
parser.add_argument("email", help="This field cannot be blank", required=True)
parser.add_argument("password", help="This field cannot be blank", required=True)
class UserLogin(Resource):
@rate_limit()
@api_response_wrapper()
def post(self):
"""
Login method for users
---
tags:
- user
parameters:
- in: body
name: body
schema:
id: UserLogin
required:
- email
- password
properties:
email:
type: string
description: The user's email.
default: "JohnDoe@mail.ru"
password:
type: string
description: The user's password.
default: "Qwerty123"
responses:
200:
description: Success user's login
schema:
properties:
success:
type: boolean
description: Response status
default: True
data:
type: array
description: Response data
items:
type: object
properties:
access_token:
type: string
refresh_token:
type: string
message:
type: string
description: Response message
400:
description: Bad request response
schema:
properties:
success:
type: boolean
description: Response status
default: False
data:
type: array
description: Response data
items:
type: object
default: ...
default: []
message:
type: string
description: Response message
429:
description: Too many requests. Limit in interval seconds.
"""
data = parser.parse_args()
email: str = data.get("email")
current_user = User.find_by_email(email=email)
if not current_user:
return {"message": f"User {email} doesn't exist"}, http.HTTPStatus.NOT_FOUND
if current_user.check_password(password=data.get("password")):
jwt_tokens: dict[str, str] = generate_jwt_tokens(
current_user=current_user, request=request
)
return {
"message": f"Logged in as {current_user.username}",
"access_token": jwt_tokens.get("access_token"),
"refresh_token": jwt_tokens.get("refresh_token"),
}, http.HTTPStatus.OK
return {"message": "Wrong credentials"}, http.HTTPStatus.BAD_REQUEST
|
from numba import jit
import math
import numpy as _np
from netket import random as _random
class _HamiltonianKernel:
def __init__(self, hamiltonian):
self._hamiltonian = hamiltonian
self._sections = _np.empty(1, dtype=_np.int32)
self._hamconn = self._hamiltonian.get_conn_flattened
self._n_conn = self._hamiltonian.n_conn
self._hilbert = hamiltonian.hilbert
def transition(self, state, state_1, log_prob_corr):
sections = self._sections
sections = _np.empty(state.shape[0], dtype=_np.int32)
vprimes = self._hamconn(state, sections)[0]
self._choose(vprimes, sections, state_1, log_prob_corr)
self._n_conn(state_1, sections)
log_prob_corr -= _np.log(sections)
def random_state(self, state):
for i in range(state.shape[0]):
self._hilbert.random_state(out=state[i])
@staticmethod
@jit(nopython=True)
def _choose(states, sections, out, w):
low_range = 0
for i, s in enumerate(sections):
n_rand = _random.randint(low_range, s, size=())
out[i] = states[n_rand]
w[i] = math.log(s - low_range)
low_range = s
|
import pytest
from data_structures.heap import Heap
@pytest.fixture
def base_heap():
heap = Heap()
heap.push(1)
heap.push(2)
heap.push(3)
heap.push(4)
heap.push(5)
return heap
def test_heap_init():
basic_heap = Heap()
init_list_heap = Heap([9, 8, 7, 5, 1, 2])
assert isinstance(basic_heap, Heap)
assert isinstance(init_list_heap, Heap)
def test_heap_push():
heap = Heap()
heap.push(2)
heap.push(3)
heap.push(1)
def test_heap_pop(base_heap):
assert base_heap.pop() == 1
assert base_heap.pop() == 2
def test_heap_peek(base_heap):
assert base_heap.peek() == 1
def test_heap_empty():
heap = Heap()
assert heap.empty()
heap.push(1)
assert not heap.empty()
def test_heapify_up_and_down(base_heap):
base_heap.pop()
base_heap.pop()
base_heap.push(8)
base_heap.push(1)
base_heap.push(0)
base_heap.push(9)
assert base_heap.get_heap() == [0, 3, 1, 8, 4, 5, 9]
def test_heapify():
heap = Heap([8, 9, 5, 1, 3, 2, 0, 6])
assert heap.get_heap() == [0, 1, 2, 6, 3, 8, 5, 9]
|
import predict
import cv2
import base64
import json
def make_json(img, happy, confidence):
# 인코딩된 스트링을 저장할 객체
encoded_string = None
# 이미지 저장
cv2.imwrite('Cropped_image.jpg', img)
# 이미지를 다시 열고 (이것은 대훈이가 이렇게 하는게 된다고 함 꼼수인듯 ㄱㅇㄷ)
with open("Cropped_image.jpg", "rb") as image_file:
encoding_string = base64.b64encode(image_file.read())
# print("길이 계산 ")
# print(len(encoding_string))
# print(type(encoding_string))
# print(encoding_string[:100])
send_msg_dict = {'value': happy, 'confidence': confidence,
'Image': str(encoding_string)}
json_msg = json.dumps(send_msg_dict)
print(happy, confidence)
cv2.imshow('Image view', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return json_msg
# Cropped된 이미지와 상태, 피곤함 측정 결과 정도를 img,happy,confidence에 받아옴.
img,happy,confidence = predict.starting('image.jpg')
json_msg = make_json(img,happy,confidence)
print(type(json_msg),len(json_msg))
|
import CGIHTTPServer,BaseHTTPServer
server_address = ('', 8000)
handler = CGIHTTPServer.CGIHTTPRequestHandler
httpd = BaseHTTPServer.HTTPServer(server_address, handler)
print("server running on port %s" %server_address[1])
httpd.serve_forever()
|
# -*- coding: utf-8 -*-
tipos = int(input())
quantidades = list(map(int, input().split()))
print(min(quantidades))
|
import torch
import torch.nn as nn
def lerp_nn(source: nn.Module, target: nn.Module, tau: float):
for t, s in zip(target.parameters(), source.parameters()):
t.data.copy_(t.data * (1. - tau) + s.data * tau)
def get_huber_loss(bellman_errors, kappa=1):
be_abs = bellman_errors.abs()
huber_loss_1 = (be_abs <= kappa).float() * 0.5 * bellman_errors ** 2
huber_loss_2 = (be_abs > kappa).float() * kappa * (be_abs - 0.5 * kappa)
return huber_loss_1 + huber_loss_2
def flat_grads(params):
return torch.cat(
[p.grad.data.flatten() for p in params if p.grad is not None])
|
from app.drivers.mslookup import base
from app.actions.mslookup import biosets as lookups
from app.drivers.options import mslookup_options
class BioSetLookupDriver(base.LookupDriver):
outsuffix = '_setlookup.sqlite'
lookuptype = 'biosets'
command = 'biosets'
commandhelp = ('Create SQLite lookup of mzML input files and '
'biological set names. Input files are passed to -i, '
'respective set names are passed to --setnames.')
def __init__(self):
super().__init__()
self.infiletype = 'mzML spectra'
def set_options(self):
super().set_options()
del(self.options['--dbfile'])
self.options.update(self.define_options(['setnames'],
mslookup_options))
def parse_input(self, **kwargs):
super().parse_input(**kwargs)
self.lookupfn = None
self.setnames = [x.replace('"', '') for x in self.setnames]
def create_lookup(self):
lookups.create_bioset_lookup(self.lookup, self.fn, self.setnames)
|
import asyncio
import base64
import hashlib
import hmac
from http.cookies import SimpleCookie
import json
import urllib.request
SALT = "datasette-auth-github"
class BadSignature(Exception):
pass
class Signer:
def __init__(self, secret):
self.secret = secret
def signature(self, value):
return (
base64.urlsafe_b64encode(salted_hmac(SALT, value, self.secret).digest())
.strip(b"=")
.decode()
)
def sign(self, value):
return "{}:{}".format(value, self.signature(value))
def unsign(self, signed_value):
if ":" not in signed_value:
raise BadSignature("No : found")
value, signature = signed_value.rsplit(":", 1)
if hmac.compare_digest(signature, self.signature(value)):
return value
raise BadSignature("Signature does not match")
async def send_html(send, html, status=200, headers=None):
headers = headers or []
if "content-type" not in [h.lower() for h, v in headers]:
headers.append(["content-type", "text/html; charset=UTF-8"])
await send(
{
"type": "http.response.start",
"status": status,
"headers": [
[key.encode("utf8"), value.encode("utf8")] for key, value in headers
],
}
)
await send({"type": "http.response.body", "body": html.encode("utf8")})
async def http_request(url, body=None):
"Performs POST if body provided, GET otherwise."
def _request():
message = urllib.request.urlopen(url, data=body)
return message.status, tuple(message.headers.raw_items()), message.read()
loop = asyncio.get_event_loop()
status_code, headers, body = await loop.run_in_executor(None, _request)
return Response(status_code, headers, body)
class Response:
"Wrapper class making HTTP responses easier to work with"
def __init__(self, status_code, headers, body):
self.status_code = status_code
self.headers = headers
self.body = body
def json(self):
return json.loads(self.text)
@property
def text(self):
# Should decode according to Content-Type, for the moment assumes utf8
return self.body.decode("utf-8")
def ensure_bytes(s):
if not isinstance(s, bytes):
return s.encode("utf-8")
else:
return s
def force_list(value):
if isinstance(value, str):
return [value]
return value
def salted_hmac(salt, value, secret):
salt = ensure_bytes(salt)
secret = ensure_bytes(secret)
key = hashlib.sha1(salt + secret).digest()
return hmac.new(key, msg=ensure_bytes(value), digestmod=hashlib.sha1)
def cookies_from_scope(scope):
cookie = dict(scope.get("headers") or {}).get(b"cookie")
if not cookie:
return {}
simple_cookie = SimpleCookie()
simple_cookie.load(cookie.decode("utf8"))
return {key: morsel.value for key, morsel in simple_cookie.items()}
|
import requests
import telebot
import time
url = 'https://api.github.com/orgs/JBossOutreach/repos'
json_data = requests.get(url).json()
numoflists = len(json_data)
stars = 0
for i in range(numoflists):
stars += json_data[i]['stargazers_count']
bot_token = '566958721:AAEVVrN8R5f0DfDW_pdxsekdpqo3C1Vs4ao'
bot = telebot.TeleBot(token = bot_token)
@bot.message_handler(commands=['liststars'])
def send_stars(message):
bot.reply_to(message,stars)
bot.polling()
print(stars)
|
# Copyright (c) Jean-Charles Lefebvre
# SPDX-License-Identifier: MIT
from sys import version_info
if version_info < (3, 6):
raise ImportError('fitdecode requires Python 3.6+')
del version_info
from .__meta__ import (
__version__, version_info,
__title__, __fancy_title__, __description__, __url__,
__license__, __author__, __copyright__)
from .exceptions import *
from .records import *
from .reader import *
from .processors import *
from . import types
from . import profile
from . import utils
from . import processors
from . import reader
|
"""
Python Template
===============
python is a collection of python TEMPLATES.
usage:
>>> import python
>>> help(python)
"""
__version__ = "0.0.1"
from . import docstring # import docstring from the same directory as __init__.py
|
from dados import produtos, pessoas, lista
print('-=-'* 20)
print(f'{" MAP com listas ":=^40}')
print(lista)
# map() sempre recebe uma função como primeiro parâmetro.
# map retorna sempre um iterador.
nova_lista = map(lambda x: x * 2, lista)
# utilizando o list comprehenson no lugar de map()
nova_lista = [x * 2 for x in lista]
print(list(nova_lista))
print('-=-'* 20)
#Utilizando MAP com dicionarios - produtos
print(f'{" MAP com dicionários 1 ":=^40}')
def aumenta_preco(p):
p['preco'] = round(p['preco'] * 1.05, 2)
return p
novo_prd = map(aumenta_preco, produtos)
for prod in novo_prd:
print(prod)
print('-=-'* 20)
#Utilizando MAP com dicionarios - pessoas
print(f'{" MAP com dicionários 2 ":=^40}')
nomes = map(lambda p: round(p['idade'] * 1.20,2), pessoas)
for pessoa in nomes:
print(pessoa)
print('-=-'* 20)
|
from sectool.services.github import GitHub
def test_make_url():
assert GitHub(user='japsu').make_owner_url('repos') == 'https://api.github.com/users/japsu/repos'
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/type/month.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/type/month.proto',
package='google.type',
syntax='proto3',
serialized_options=_b('\n\017com.google.typeB\nMonthProtoP\001Z6google.golang.org/genproto/googleapis/type/month;month\242\002\003GTP'),
serialized_pb=_b('\n\x17google/type/month.proto\x12\x0bgoogle.type*\xb0\x01\n\x05Month\x12\x15\n\x11MONTH_UNSPECIFIED\x10\x00\x12\x0b\n\x07JANUARY\x10\x01\x12\x0c\n\x08\x46\x45\x42RUARY\x10\x02\x12\t\n\x05MARCH\x10\x03\x12\t\n\x05\x41PRIL\x10\x04\x12\x07\n\x03MAY\x10\x05\x12\x08\n\x04JUNE\x10\x06\x12\x08\n\x04JULY\x10\x07\x12\n\n\x06\x41UGUST\x10\x08\x12\r\n\tSEPTEMBER\x10\t\x12\x0b\n\x07OCTOBER\x10\n\x12\x0c\n\x08NOVEMBER\x10\x0b\x12\x0c\n\x08\x44\x45\x43\x45MBER\x10\x0c\x42]\n\x0f\x63om.google.typeB\nMonthProtoP\x01Z6google.golang.org/genproto/googleapis/type/month;month\xa2\x02\x03GTPb\x06proto3')
)
_MONTH = _descriptor.EnumDescriptor(
name='Month',
full_name='google.type.Month',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MONTH_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JANUARY', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FEBRUARY', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MARCH', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='APRIL', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MAY', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JUNE', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JULY', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUGUST', index=8, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SEPTEMBER', index=9, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OCTOBER', index=10, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOVEMBER', index=11, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DECEMBER', index=12, number=12,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=41,
serialized_end=217,
)
_sym_db.RegisterEnumDescriptor(_MONTH)
Month = enum_type_wrapper.EnumTypeWrapper(_MONTH)
MONTH_UNSPECIFIED = 0
JANUARY = 1
FEBRUARY = 2
MARCH = 3
APRIL = 4
MAY = 5
JUNE = 6
JULY = 7
AUGUST = 8
SEPTEMBER = 9
OCTOBER = 10
NOVEMBER = 11
DECEMBER = 12
DESCRIPTOR.enum_types_by_name['Month'] = _MONTH
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
### The Bat-Code ###
import turtle as dark
import math
dark.width(8)
dark.bgcolor("black")
dark.color("#FDD017")
zoom=30
dark.left(90)
dark.penup()
dark.goto(-7*zoom,0)
dark.pendown()
for xz in range(-7*zoom,-3*zoom,1):
x=xz/zoom
absx=math.fabs(x)
y=1.5*math.sqrt((-math.fabs(absx-1))*math.fabs(3-absx)/((absx-1)*(3-absx)))*(1+math.fabs(absx-3)/(absx-3))*math.sqrt(1-(x/7)**2)+(4.5+0.75*(math.fabs(x-0.5)+math.fabs(x+0.5))-2.75*(math.fabs(x-0.75)+math.fabs(x+0.75)))*(1+math.fabs(1-absx)/(1-absx))
dark.goto(xz,y*zoom)
for xz in range(-3*zoom,-1*zoom-1,1):
x=xz/zoom
absx=math.fabs(x)
y=(2.71052+1.5-0.5*absx-1.35526*math.sqrt(4-(absx-1)**2))*math.sqrt(math.fabs(absx-1)/(absx-1))
dark.goto(xz,y*zoom)
dark.goto(-1*zoom,3*zoom)
dark.goto(int(-0.5*zoom),int(2.2*zoom))
dark.goto(int(0.5*zoom),int(2.2*zoom))
dark.goto(1*zoom,3*zoom)
for xz in range(1*zoom+1,3*zoom+1,1):
x=xz/zoom
absx=math.fabs(x)
y=(2.71052+1.5-0.5*absx-1.35526*math.sqrt(4-(absx-1)**2))*math.sqrt(math.fabs(absx-1)/(absx-1))
dark.goto(xz,y*zoom)
for xz in range(3*zoom+1,7*zoom+1,1):
x=xz/zoom
absx=math.fabs(x)
y = 1.5*math.sqrt((-math.fabs(absx-1))*math.fabs(3-absx)/((absx-1)*(3-absx)))*(1+math.fabs(absx-3)/(absx-3))*math.sqrt(1-(x/7)**2)+(4.5+0.75*(math.fabs(x-0.5)+math.fabs(x+0.5))-2.75*(math.fabs(x-0.75)+math.fabs(x+0.75)))*(1+math.fabs(1-absx)/(1-absx))
dark.goto(xz,y*zoom)
for xz in range(7*zoom,4*zoom,-1):
x=xz/zoom
absx=math.fabs(x)
y=(-3)*math.sqrt(1-(x/7)**2) * math.sqrt(math.fabs(absx-4)/(absx-4))
dark.goto(xz,y*zoom)
for xz in range(4*zoom,-4*zoom,-1):
x=xz/zoom
absx=math.fabs(x)
y=math.fabs(x/2)-0.0913722*x**2-3+math.sqrt(1-(math.fabs(absx-2)-1)**2)
dark.goto(xz,y*zoom)
for xz in range(-4*zoom-1,-7*zoom-1,-1):
x=xz/zoom
absx=math.fabs(x)
y =(-3)*math.sqrt(1-(x/7)**2) * math.sqrt(math.fabs(absx-4)/(absx-4))
dark.goto(xz,y*zoom)
dark.penup()
#dark.goto(300,300)
#Heading
dark.goto (0,-190)
dark.speed(40)
dark.color('#FFA62F')
style = ('verdana', 40, 'bold')
dark.pu()
dark.write('GOTHAM', font=style, align='center')
dark.pd()
dark.done()
|
# coding: utf-8
from dohq_teamcity.custom.base_model import TeamCityObject
# from dohq_teamcity.models.license_key import LicenseKey # noqa: F401,E501
class LicenseKeys(TeamCityObject):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'count': 'int',
'href': 'str',
'license_key': 'list[LicenseKey]'
}
attribute_map = {
'count': 'count',
'href': 'href',
'license_key': 'licenseKey'
}
def __init__(self, count=None, href=None, license_key=None, teamcity=None): # noqa: E501
"""LicenseKeys - a model defined in Swagger""" # noqa: E501
self._count = None
self._href = None
self._license_key = None
self.discriminator = None
if count is not None:
self.count = count
if href is not None:
self.href = href
if license_key is not None:
self.license_key = license_key
super(LicenseKeys, self).__init__(teamcity=teamcity)
@property
def count(self):
"""Gets the count of this LicenseKeys. # noqa: E501
:return: The count of this LicenseKeys. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this LicenseKeys.
:param count: The count of this LicenseKeys. # noqa: E501
:type: int
"""
self._count = count
@property
def href(self):
"""Gets the href of this LicenseKeys. # noqa: E501
:return: The href of this LicenseKeys. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this LicenseKeys.
:param href: The href of this LicenseKeys. # noqa: E501
:type: str
"""
self._href = href
@property
def license_key(self):
"""Gets the license_key of this LicenseKeys. # noqa: E501
:return: The license_key of this LicenseKeys. # noqa: E501
:rtype: list[LicenseKey]
"""
return self._license_key
@license_key.setter
def license_key(self, license_key):
"""Sets the license_key of this LicenseKeys.
:param license_key: The license_key of this LicenseKeys. # noqa: E501
:type: list[LicenseKey]
"""
self._license_key = license_key
|
import re
from oks import ok
from config import THE
def csv(src):
for x,y in xy( # seperate line into independent and depednent variables
data( # convert (some) strings to floats
cols( # kill cols we are skipping
rows( # kill blanks and comments
src)))): # reading from some source
yield x,y
def fromString(txt):
for line in txt.splitlines(): yield line
def fromFile(file):
with open(file) as fs:
for line in fs:
yield line
#----------------
def rows(src, doomed=r'([\n\r\t ]|#.*)', sep=","):
# ascii file to rows of cells
for line in src:
line = re.sub(doomed, "", line)
if line:
yield line.split(sep)
def cols(src, skip="?"):
use=None
for row in src:
use = use or [n for n, x in enumerate(row) if x[0] != skip]
assert len(row) == len(use), 'row %s lacks %s cells' % (n, len(use))
yield [row[n] for n in use]
def data(src, rules={"$": float, "<": float, ">": float, "_": int}):
"rows of cells coerced to values according to the rules seen on line 1"
changes = None
change1 = lambda x, f: x if x[0] == "?" else f(x)
for row in src:
if changes:
row = [change1(x, f) for x, f in zip(row, changes)]
else:
changes = [rules.get(x[0], str) for x in row]
yield row
def xy(src, rules=['<', '>', '!']):
"rows of values divided into independent and dependent values"
xs, ys = None, None
for row in src:
xs = xs or [n for n, z in enumerate(row) if not z[0] in rules]
ys = ys or [n for n, z in enumerate(row) if z[0] in rules]
yield [row[x] for x in xs], [row[y] for y in ys]
@ok
def XY():
"demo xy import"
for n, r in enumerate(csv(fromFile(THE.data))):
if n < 10:
print(r)
@ok
def FROMSTRING():
"REad csv data from string"
string="""
outlook, $temp, $humid, wind, !play
sunny, 85, 85, FALSE, no
sunny, 80, 90, TRUE, no
overcast, 83, 86, FALSE, yes
rainy, 70, 96, FALSE, yes
rainy, 68, 80, FALSE, yes
rainy, 65, 70, TRUE, no
overcast, 64, 65, TRUE, yes
sunny, 72, 95, FALSE, no
sunny, 69, 70, FALSE, yes
rainy, 75, 80, FALSE, yes
sunny, 75, 70, TRUE, yes
overcast, 72, 90, TRUE, yes
overcast, 81, 75, FALSE, yes
rainy, 71, 91, TRUE, no
"""
for x,y in csv(fromString(string)):
print(x," ==> ", y)
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
from flask import request, Request, jsonify
from flask import Flask
from flask import make_response
from kombu.connection import BrokerConnection
from kombu.messaging import Exchange, Queue, Consumer, Producer
import logging
import logging.handlers
import logging.config
import exampleservice_stats as stats
import threading
import subprocess
import six
import uuid
import datetime
from urlparse import urlparse
app = Flask(__name__)
start_publish = False
keystone_tenant_id='3a397e70f64e4e40b69b6266c634d9d0'
keystone_user_id='1e3ce043029547f1a61c1996d1a531a2'
rabbit_user='openstack'
rabbit_password='80608318c273f348a7c3'
rabbit_host='10.11.10.1'
rabbit_exchange='cord'
publisher_id='exampleservice_publisher'
@app.route('/monitoring/agent/exampleservice/start',methods=['POST'])
def exampleservice_start_monitoring_agent():
global start_publish, rabbit_user, rabbit_password, rabbit_host, rabbit_exchange
global keystone_tenant_id, keystone_user_id, publisher_id
try:
# To do validation of user inputs for all the functions
target = request.json['target']
logging.debug("target:%s",target)
keystone_user_id = request.json['keystone_user_id']
keystone_tenant_id = request.json['keystone_tenant_id']
url = urlparse(target)
rabbit_user = url.username
rabbit_password = url.password
rabbit_host = url.hostname
setup_rabbit_mq_channel()
start_publish = True
periodic_publish()
logging.info("Exampleservice monitoring is enabled")
return "Exampleservice monitoring is enabled"
except Exception as e:
return e.__str__()
@app.route('/monitoring/agent/exampleservice/stop',methods=['POST'])
def openstack_stop():
global start_publish
start_publish = False
logging.info ("Exampleservice monitoring is stopped")
return "Exampleservice monitoring is stopped"
producer = None
def setup_rabbit_mq_channel():
global producer
global rabbit_user, rabbit_password, rabbit_host, rabbit_exchange,publisher_id
service_exchange = Exchange(rabbit_exchange, "topic", durable=False)
# connections/channels
connection = BrokerConnection(rabbit_host, rabbit_user, rabbit_password)
logging.info('Connection to RabbitMQ server successful')
channel = connection.channel()
# produce
producer = Producer(channel, exchange=service_exchange, routing_key='notifications.info')
p = subprocess.Popen('hostname', shell=True, stdout=subprocess.PIPE)
(hostname, error) = p.communicate()
publisher_id = publisher_id + '_on_' + hostname
logging.info('publisher_id=%s',publisher_id)
def publish_exampleservice_stats(example_stats):
global producer
global keystone_tenant_id, keystone_user_id, publisher_id
for k,v in example_stats.iteritems():
msg = {'event_type': 'cord.'+k,
'message_id':six.text_type(uuid.uuid4()),
'publisher_id': publisher_id,
'timestamp':datetime.datetime.now().isoformat(),
'priority':'INFO',
'payload': {'counter_name':k,
'counter_unit':v['unit'],
'counter_volume':v['val'],
'counter_type':v['metric_type'],
'resource_id':'exampleservice',
'user_id':keystone_user_id,
'tenant_id':keystone_tenant_id
}
}
producer.publish(msg)
logging.debug('Publishing exampleservice event: %s', msg)
def periodic_publish():
global start_publish
if not start_publish:
return
stats.retrieve_status_page()
resParse = stats.parse_status_page()
logging.debug ("publish:%(data)s" % {'data':resParse})
publish_exampleservice_stats(resParse)
threading.Timer(60, periodic_publish).start()
if __name__ == "__main__":
logging.config.fileConfig('monitoring_agent.conf', disable_existing_loggers=False)
logging.info ("Exampleservice monitoring is listening on port 5004")
app.run(host="0.0.0.0",port=5004,debug=False)
|
class Unit(object):
def __init__(self, shortname, fullname):
self._shortname = shortname
self._fullname = fullname
def shortname(self):
return self._shortname
def fullname(self):
return self._fullname
def __str__(self):
return 'Unit(%s, %s)' % (self.shortname(), self.fullname())
def __repr__(self):
return '%s/%s' % (self.shortname(), self.fullname())
|
""" Interface to run an experiment on Kernel Tuner """
from copy import deepcopy
import numpy as np
import progressbar
from typing import Any, Tuple
import time as python_time
import warnings
import yappi
from metrics import units, quantity
from caching import CachedObject
record_data = ['mean_actual_num_evals']
def remove_duplicates(res: list, remove_duplicate_results: bool):
""" Removes duplicate configurations from the results """
if not remove_duplicate_results:
return res
unique_res = list()
for result in res:
if result not in unique_res:
unique_res.append(result)
return unique_res
def tune(kernel, kernel_name: str, device_name: str, strategy: dict, tune_options: dict, profiling: bool) -> Tuple[list, int]:
""" Execute a strategy, return the result, runtime and optional profiling statistics """
def tune_with_kerneltuner():
""" interface with kernel tuner to tune the kernel and return the results """
if profiling:
yappi.set_clock_type("cpu")
yappi.start()
res, env = kernel.tune(device_name=device_name, strategy=strategy['strategy'], strategy_options=strategy['options'], **tune_options)
if profiling:
yappi.stop()
return res, env
total_start_time = python_time.perf_counter()
warnings.simplefilter("ignore", UserWarning)
try:
res, _ = tune_with_kerneltuner()
except ValueError:
print("Something went wrong, trying once more.")
res, _ = tune_with_kerneltuner()
warnings.simplefilter("default", UserWarning)
total_end_time = python_time.perf_counter()
total_time_ms = round((total_end_time - total_start_time) * 1000)
# TODO when profiling, should the total_time_ms not be the time from profiling_stats? Otherwise we're timing the profiling code as well
return res, total_time_ms
def collect_results(kernel, kernel_name: str, device_name: str, strategy: dict, expected_results: dict, default_records: dict, profiling: bool,
optimization='time', remove_duplicate_results=True) -> dict:
""" Executes strategies to obtain (or retrieve from cache) the statistical data """
print(f"Running {strategy['display_name']}")
nums_of_evaluations = strategy['nums_of_evaluations']
max_num_evals = min(strategy['nums_of_evaluations'])
# TODO put the tune options in the .json in strategy_defaults?
tune_options = {
'verbose': False,
'quiet': True,
'simulation_mode': True
}
def report_multiple_attempts(rep: int, len_res: int, len_unique_res: int, strategy_repeats: int):
""" If multiple attempts are necessary, report the reason """
if len_res < 1:
print(f"({rep+1}/{strategy_repeats}) No results found, trying once more...")
elif len_unique_res < max_num_evals:
print(f"Too few unique results found ({len_unique_res} in {len_res} evaluations), trying once more...")
else:
print(f"({rep+1}/{strategy_repeats}) Only invalid results found, trying once more...")
# repeat the strategy as specified
repeated_results = list()
total_time_results = np.array([])
for rep in progressbar.progressbar(range(strategy['repeats']), redirect_stdout=True):
attempt = 0
only_invalid = True
while only_invalid or (remove_duplicate_results and len_unique_res < max_num_evals):
if attempt > 0:
report_multiple_attempts(rep, len_res, len_unique_res, strategy['repeats'])
res, total_time_ms = tune(kernel, kernel_name, device_name, strategy, tune_options, profiling)
len_res = len(res)
# check if there are only invalid configs, if so, try again
only_invalid = len_res < 1 or min(res[:20], key=lambda x: x['time'])['time'] == 1e20
unique_res = remove_duplicates(res, remove_duplicate_results)
len_unique_res = len(unique_res)
attempt += 1
# register the results
repeated_results.append(unique_res)
total_time_results = np.append(total_time_results, total_time_ms)
if len(strategy['nums_of_evaluations']) <= 0:
nums_of_evaluations = np.append(nums_of_evaluations, len_unique_res)
# gather profiling data and clear the profiler before the next round
if profiling:
stats = yappi.get_func_stats()
# stats.print_all()
path = "../experiments/profilings/V3/profile-v1.prof"
stats.save(path, type="pstat") # pylint: disable=no-member
yappi.clear_stats()
# transpose and summarise to get the results per number of evaluations
strategy['nums_of_evaluations'] = nums_of_evaluations
results_to_write = deepcopy(expected_results)
results_to_write = transpose_results(results_to_write, repeated_results, optimization, strategy)
results_to_write = summarise_results(default_records, results_to_write, total_time_results, strategy, tune_options)
return results_to_write
def transpose_results(results_to_write: dict, repeated_results: list, optimization: str, strategy: dict) -> dict:
""" Transposes the results for summarise_results to go from num. of evaluations per result to results per num. of evaluations """
nums_of_evaluations = strategy['nums_of_evaluations']
for res_index, res in enumerate(repeated_results):
for num_of_evaluations in nums_of_evaluations:
limited_res = res[:num_of_evaluations]
if optimization == 'time':
best = min(limited_res, key=lambda x: x['time'])
elif optimization == 'GFLOP/s':
best = max(limited_res, key=lambda x: x['GFLOP/s'])
time = best['time']
if time == 1e20:
error_message = f"({res_index+1}/{len(repeated_results)}) Only invalid values found after {num_of_evaluations} evaluations for strategy {strategy['display_name']}. Values: {limited_res}"
raise ValueError(error_message)
gflops = best['GFLOP/s'] if 'GFLOP/s' in best else np.nan
cumulative_execution_time = sum(x['time'] for x in limited_res if x['time'] != 1e20)
loss = best['loss'] if 'loss' in best else np.nan
noise = best['noise'] if 'noise' in best else np.nan
# write to the results to the arrays
result = results_to_write['results_per_number_of_evaluations'][str(num_of_evaluations)]
result['actual_num_evals'] = np.append(result['actual_num_evals'], len(limited_res))
result['time'] = np.append(result['time'], time)
result['GFLOP/s'] = np.append(result['GFLOP/s'], gflops)
result['loss'] = np.append(result['loss'], loss)
result['noise'] = np.append(result['noise'], noise)
result['cumulative_execution_time'] = np.append(result['cumulative_execution_time'], cumulative_execution_time)
return results_to_write
def summarise_results(default_records: dict, expected_results: dict, total_time_results: np.ndarray, strategy: dict, tune_options: dict) -> dict:
""" For every number of evaluations specified, find the best and collect details on it """
# create the results dict for this strategy
nums_of_evaluations = strategy['nums_of_evaluations']
results_to_write = deepcopy(expected_results)
# add the total time in miliseconds
total_time_mean = np.mean(total_time_results)
total_time_std = np.std(total_time_results)
total_time_mean_per_eval = total_time_mean / nums_of_evaluations[-1]
print("Total mean time: {} ms, std {}".format(round(total_time_mean, 3), round(total_time_std, 3)))
results_to_write['total_time_mean'] = total_time_mean
results_to_write['total_time_err'] = total_time_std
for num_of_evaluations in nums_of_evaluations:
result = results_to_write['results_per_number_of_evaluations'][str(num_of_evaluations)]
# automatically summarise from default_records
# TODO look into calculation of compile and execution times
result['mean_cumulative_compile_time'] = 0
cumulative_total_time = total_time_mean_per_eval * num_of_evaluations
mean_runtimes = ['mean_cumulative_strategy_time', 'mean_cumulative_total_time']
for key in default_records.keys():
if key in mean_runtimes:
continue
if key == 'mean_cumulative_strategy_time':
result[
'mean_cumulative_strategy_time'] = cumulative_total_time - result['mean_cumulative_compile_time'] - result['mean_cumulative_execution_time']
if tune_options['simulation_mode']:
result['mean_cumulative_strategy_time'] = cumulative_total_time
elif key == 'mean_cumulative_total_time':
result['mean_cumulative_total_time'] = cumulative_total_time + result['mean_cumulative_compile_time'] + result['mean_cumulative_execution_time']
if tune_options['simulation_mode']:
result['mean_cumulative_total_time'] = cumulative_total_time
elif key == 'mean_cumulative_compile_time':
continue
elif key.startswith('mean_'):
result[key] = np.mean(result[key.replace('mean_', '')])
elif key.startswith('err_'):
result[key] = np.std(result[key.replace('err_', '')])
# summarise execution times
# TODO do this properly
# check for errors
if 'err_actual_num_evals' in default_records.keys() and result['err_actual_num_evals'] != 0:
raise ValueError('The number of actual evaluations over the runs has varied: {}'.format(result['actual_num_evals']))
if 'mean_actual_num_evals' in default_records.keys() and result['mean_actual_num_evals'] != num_of_evaluations:
print(
"The set number of evaluations ({}) is not equal to the actual number of evaluations ({}). Try increasing the fraction or maxiter in strategy options."
.format(num_of_evaluations, result['mean_actual_num_evals']))
return results_to_write
|
from django.contrib import admin
from umap.models import Result, Race, Pmodel, Log
class ResultInline(admin.TabularInline):
fields = ["rank", "bracket", "horse_num", "horse_name", "sex", "age", "jockey_name", "weight", "finish_time",
"time_lag", "odds", "prize"]
model = Result
readonly_fields = fields
ordering = ["rank", "horse_num"]
def has_delete_permission(self, request, obj):
return False
def has_add_permission(self, request):
return False
class RaceAdmin(admin.ModelAdmin):
list_display = ("race_dt", "place_name", "round", "title", "course", "weather", "condition", "result_flg")
ordering = ["-result_flg", "-race_dt", "race_id"]
search_fields = ["race_id", "race_dt", "title", "results__horse_name", "place_name"]
inlines = [ResultInline]
class ResultAdmin(admin.ModelAdmin):
list_display = ("rank", "bracket", "horse_num", "horse_name", "sex", "age", "finish_time", "odds", "odor")
search_fields = ["key", "race__race_id", "race__title", "horse_name"]
class PmodelAdmin(admin.ModelAdmin):
list_display = ("title", "method", "columns", "recall", "precision", "roi", "updated_at")
class LogAdmin(admin.ModelAdmin):
ordering = ["-start_time", "pid"]
list_display = ("start_time", "pid", "label", "exec_time", "finish")
admin.site.register(Race, RaceAdmin)
admin.site.register(Result, ResultAdmin)
admin.site.register(Pmodel, PmodelAdmin)
admin.site.register(Log, LogAdmin)
|
import cv2
import numpy as np
import face_recognition as fr
import serial
from gpiozero import LED
from numpy import savetxt
from numpy import loadtxt
inFace = "images/"+input("Enter path to Face: ")
mode = input("Enter Mode [2 for rPI, 1 for arduino, 0 for null]: ")
outFace = loadtxt("faEnc.csv", delimiter=",")
imgTs = fr.load_image_file(inFace)
imgTs = cv2.cvtColor(imgTs, cv2.COLOR_BGR2RGB)
def faceAdd(fa):
imgTr = fr.load_image_file("images/"+fa)
imgTr = cv2.cvtColor(imgTr, cv2.COLOR_BGR2RGB)
faLocTr = fr.face_locations(imgTr)[0]
outFace = fr.face_encodings(imgTr)[0]
cv2.rectangle(imgTr, (faLocTr[3], faLocTr[0]),
(faLocTr[1], faLocTr[2]), (255, 0, 255), 2)
savetxt("faEnc.csv", outFace, delimiter=',')
# faceAdd("suraj.jpg")
faLocTs = fr.face_locations(imgTs)[0]
faEncTs = fr.face_encodings(imgTs)[0]
cv2.rectangle(imgTs, (faLocTs[3], faLocTs[0]),
(faLocTs[1], faLocTs[2]), (255, 0, 255), 2)
result = fr.compare_faces([outFace], faEncTs)
if result[0] == True:
result = "H" # nodemcu problem high = low
else:
result = "L" # nodemcu problem low = high
accu = fr.face_distance([outFace], faEncTs)
print(result)
print(accu*100, "Accuracy")
if(mode == "1"):
port = "COM5"
Brate = 9600
a = result
se = serial.Serial(port,Brate)
if a == "H":
se.write(b"H")
elif a == "L":
se.write(b"L")
se.close()
if(mode == "2"):
l = LED(22)
if(result == "H"):
l.on()
else:
l.off()
cv2.imshow("ElonTest", imgTs)
cv2.waitKey(0)
|
# Time: O(V+E)
# Space: O(times)
class Solution:
def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int:
import heapq
graph = collections.defaultdict(list)
for src, tgt, wt in times:
graph[src].append([wt, tgt])
heap = [[0, K]] # time it took to get to start, start,
max_time = float('-inf')
visited = set()
# do bfs
while heap:
cur_time, cur_node = heapq.heappop(heap)
visited.add(cur_node)
max_time = max(max_time, cur_time)
if len(visited)==N:
return max_time
for time, nei in graph[cur_node]:
if nei not in visited:
heapq.heappush(heap, [cur_time+time, nei])
return max_time if len(visited)==N else -1
|
import music21 as mus
import numpy as np
class NoteDistribution:
'''
This class contains methods to get statistics about the
notes in the melody of a song.
'''
def __init__(self):
pass
@staticmethod
def get_note_matrix(scores):
'''
This method takes a list of score objects as input.
The output is a tuple containing the a 128x128 matrix
with MIDI notes probabilities and a note probability dictionary.
OUTPUT: (128x128 matrix, {Note: probability})
'''
# Dictionary to store probabilites of neighboring notes
note_pair_dictionary = {}
note_dictionary = {}
# iterate through all the scores
for score in scores:
# The actual list of notes
notes = score.flat.notes
itr = iter(range(len(notes) - 1))
# Iterate through all the notes
for i in itr:
# Make sure all the notes are ACTUALLY notes...
if not isinstance(notes[i], mus.note.Note) or not isinstance(notes[i+1], mus.note.Note):
continue
# Create pairs of notes for stochastic matrix
pair = str(notes[i].pitch.midi) + " " + str(notes[i+1].pitch.midi)
# Add them to the dictionary
if pair in note_pair_dictionary:
note_pair_dictionary[pair] += 1
else:
note_pair_dictionary[pair] = 1
if str(notes[i].pitch.midi) in note_dictionary:
note_dictionary[str(notes[i].pitch.midi)] += 1
else:
note_dictionary[str(notes[i].pitch.midi)] = 1
# Raise NoNotesFoundException if notes is empty
if len(notes) == 0:
raise NoNotesFoundException()
# Get the very last note since we miss it in the loop above
if notes[len(notes)-1].pitch.midi in note_dictionary:
note_dictionary[str(notes[len(notes) - 1].pitch.midi)] += 1
else:
note_dictionary[str(notes[len(notes) - 1].pitch.midi)] = 1
# Check if the notes are actually being added to the dictionary
# We could have issues if a file ONLY has chords
if len(note_pair_dictionary.keys()) == 0:
raise NoNotesFoundException()
note_dictionary_probability = NoteDistribution.get_note_probabilities(note_dictionary)
stochastic_matrix = NoteDistribution.get_stochastic_note_matrix(note_pair_dictionary)
return (stochastic_matrix, note_dictionary_probability)
@staticmethod
def get_stochastic_note_matrix(distribution):
'''
This method takes a dictionary as input {Note : probability}.
It ouputs a 128x128 matrix representing the probability of each
MIDI note and the interaction with other notes.
OUPUT: 128x128 matrix
'''
# 128 total MIDI notes
size = 128
# Create matrix of 128x128, one row and column for each MIDI note
matrix = np.zeros((size, size), dtype=float)
for i in distribution.keys():
value = distribution[i]
# MIDI note values were kept as strings in the dictionary.
# Change them to ints
indices = [int(i.split(' ')[0]), int(i.split(' ')[1])]
# Insert the value at the proper position
matrix[indices[0], indices[1]] = value
for i in range(len(matrix[0, ])):
sum_count = sum(matrix[i, ])
for j in range(len(matrix[0, ])):
if sum_count != 0:
matrix[i, j] = np.divide(matrix[i, j], sum_count)
return matrix
@staticmethod
def get_note_probabilities(distribution):
'''
This method takes a dictionary as input {Note : number of occurences}.
It ouputs a dictionary with the probability of each note occuring.
OUTPUT: {Note : probability}
'''
total = sum(distribution.values())
for key in distribution.keys():
distribution[key] /= total
return distribution
class NoNotesFoundException(Exception):
'''
This class contains methods to raise exception of files having no notes.
'''
def __init__(self, message="The provided MIDI or MusicXML files do not contain any notes."):
self.message = message
super().__init__(self.message)
|
#!/usr/bin/env python
# -*- encoding: utf-8
"""
Best-effort clean up of downloaded YouTube .srt subtitle files.
"""
import re
import sys
data = open(sys.argv[1]).read()
# Now throw away all the timestamps, which are typically of
# the form:
#
# 00:00:01,819 --> 00:00:01,829 align:start position:0%
#
data, _ = re.subn(
r'\d{2}:\d{2}:\d{2},\d{3} \-\-> \d{2}:\d{2}:\d{2},\d{3} align:start position:0%\n',
'',
data
)
# And the color changes, e.g.
#
# <c.colorE5E5E5>
#
data, _ = re.subn(r'<c\.color[0-9A-Z]{6}>', '', data)
# And any other timestamps, typically something like:
#
# </c><00:00:00,539><c>
#
# with optional closing/opening tags.
data, _ = re.subn(r'(?:</c>)?(?:<\d{2}:\d{2}:\d{2},\d{3}>)?(?:<c>)?', '', data)
# 00:00:03,500 --> 00:00:03,510
data, _ = re.subn(r'\d{2}:\d{2}:\d{2},\d{3} \-\-> \d{2}:\d{2}:\d{2},\d{3}\n', '', data)
# Separate out the different segments of the subtitle track.
# I'm not sure what these numbers mean, but they're a start!
components = [data]
while True:
i = len(components)
last_component = components.pop()
if f'\n{i}\n' in last_component:
components.extend(list(last_component.split(f'\n{i}\n')))
assert len(components) == i + 1
elif last_component.startswith('1\n'):
components.extend(list(last_component.split(f'1\n', 1)))
else:
break
# Now chuck away the first bit, which is something like "Kind: captions"
# -- I don't know what it is, but we don't need it.
if components[0].startswith('Kind: captions\n'):
components.pop(0)
# Next, remove all the trailing whitespace from each subtitle.
components = [c.rstrip() for c in components]
# This gets a lot of duplication -- try to remove it as best we can.
dedupe_components = []
for c in components:
if not c:
continue
for line in c.splitlines():
if dedupe_components and dedupe_components[-1] == line:
continue
else:
dedupe_components.append(line)
with open(sys.argv[1] + '.txt', 'w') as outfile:
outfile.write('\n'.join(dedupe_components))
print(sys.argv[1] + '.txt')
|
import os
import numpy as np
from .dsl import dsnn
|
#---------------------------------------------------------------------------
# Copyright 2014 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
"""
A Python dictionary to specify the format to
output FileMan File data entry in html format.
The key of the dictionary is FileMan File Number in string format
The Value of the dictionary is a dictionary of various key/value.
1. Key "Name": Special Fields or function to display the entry, the default
is to use the .01 field if not specified here.
2. Key "Fields": A List of fields in order for displaying a summary
of the data entry.
3. Key "Category": A Function to categorize the data entry by package,
default is None
"""
FILEMAN_FILE_OUTPUT_FORMAT = {
"8994": { # REMOTE PROCEDURE FILE # 8994
"Fields": [
{ 'id': '.01', }, # Name
{ 'id': '.02', }, # Tag
{ 'id': '.03', 'func': None}, # Routine
{ 'id': '.05', }, # Availability
],
"Category": None, # Categorize by package
},
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from gym_minigrid.minigrid import *
from gym_minigrid.register import register
import numpy as np
class FourRoomsSkillsEnv(MiniGridEnv):
"""
Classic 4 rooms gridworld environment.
Can specify agent and goal position, if not it set at random.
"""
def __init__(self, train: bool, skills: list, agent_pos=None, goal_pos=None, visualize=False):
# TODO: Remember to make sure skills are already removed according to length range
self._train = train
self._skills = skills
self._agent_default_pos = agent_pos
self._goal_default_pos = goal_pos
self.goal_pos = None
self._visualize=visualize # set True for visualization (see manual_control_skills.py)
grid_size = 19
# for large fourrooms, change to (grid_size=38, max_steps=200)
super().__init__(grid_size=grid_size, max_steps=100)
self.action_space = spaces.Discrete(len(self._skills))
self.observation_space = spaces.Box(
# [agent_x, agent_y, agent_dir, goal_x, goal_y]
low = np.array([0, 0, 0, 0, 0]),
high = np.array([grid_size - 1, grid_size - 1, 3, grid_size - 1, grid_size - 1])
)
def _gen_grid(self, width, height):
# Create the grid
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.horz_wall(0, 0)
self.grid.horz_wall(0, height - 1)
self.grid.vert_wall(0, 0)
self.grid.vert_wall(width - 1, 0)
room_w = width // 2
room_h = height // 2
# For each row of rooms
for j in range(0, 2):
# For each column
for i in range(0, 2):
xL = i * room_w
yT = j * room_h
xR = xL + room_w
yB = yT + room_h
# Bottom wall and door
if i + 1 < 2:
self.grid.vert_wall(xR, yT, room_h)
pos = (xR, self._rand_int(yT + 1, yB))
self.grid.set(*pos, None)
# Bottom wall and door
if j + 1 < 2:
self.grid.horz_wall(xL, yB, room_w)
pos = (self._rand_int(xL + 1, xR), yB)
self.grid.set(*pos, None)
# Randomize the player start position and orientation
if self._agent_default_pos is not None:
self.agent_pos = self._agent_default_pos
self.grid.set(*self._agent_default_pos, None)
self.agent_dir = self._rand_int(0, 4) # assuming random start direction
else:
self.place_agent()
if self._goal_default_pos is not None:
goal = Goal()
self.put_obj(goal, *self._goal_default_pos)
goal.init_pos, goal.cur_pos = self._goal_default_pos
else:
self.place_obj(Goal())
self.mission = 'Reach the goal'
def add_heat(self, search_path):
self.grid.add_heat(search_path)
def update_skills(self, skills):
self._skills = skills
self.action_space = spaces.Discrete(len(self._skills))
def set_path(self, i, j, skill):
if (i,j) == self.goal_pos or i == self.agent_pos[0] and j == self.agent_pos[1]:
return
self.grid.set_path(i,j,skill)
def step(self, action, skill=None):
total_reward = 0
actual_action = self._skills[action]
for primitive_action in actual_action:
obs, reward, done, info = MiniGridEnv.step(self, primitive_action)
total_reward += reward
if done:
break
if self._visualize:
return (obs, self.build_obs()), total_reward, done, info
else:
return self.build_obs(), total_reward, done, info
def reset(self):
# keep resetting MiniGrid until training_valid depending on train/test
while True:
# Current position and direction of the agent
self.agent_pos = None
self.agent_dir = None
# Generate a new random grid at the start of each episode
# To keep the same grid for each episode, call env.seed() with
# the same seed before calling env.reset()
self._gen_grid(self.width, self.height)
# These fields should be defined by _gen_grid
assert self.agent_pos is not None
assert self.agent_dir is not None
# Check that the agent doesn't overlap with an object
start_cell = self.grid.get(*self.agent_pos)
assert start_cell is None or start_cell.can_overlap()
# Item picked up, being carried, initially nothing
self.carrying = None
# Step count since episode start
self.step_count = 0
# Generate goal
self.goal_pos = self.grid.find_goal()
if not self.training_valid() ^ self._train: # XNOR
obs = self.gen_obs()
break
# Return observation
if self._visualize:
return (obs, self.build_obs())
else:
return self.build_obs()
# include here to prevent circular dependency
def training_valid(self):
agent_pos = self.agent_pos
goal_pos = self.goal_pos
agent_goal = (in_room(agent_pos), in_room(goal_pos))
test_set = {
(1, 3),
(3, 1),
(2, 4),
(4, 2)
}
return agent_goal not in test_set
def build_obs(self):
return np.concatenate((self.agent_pos, [self.agent_dir], self.goal_pos), axis=0)
# include here to prevent circular dependency
def in_room(pos):
# | 4 | 1 |
# | 3 | 2 |
w, h = pos
if w < 9 and h < 9:
return 4
elif w < 9 and h > 9:
return 3
elif w > 9 and h < 9:
return 1
else:
return 2
register(
id='MiniGrid-FourRoomsSkills-v0',
entry_point='gym_minigrid.envs:FourRoomsSkillsEnv'
)
|
from tzwhere import tzwhere
import datetime
import unittest
class LocationTestCase(unittest.TestCase):
TEST_LOCATIONS = (
( 35.295953, -89.662186, 'Arlington, TN', 'America/Chicago'),
( 33.58, -85.85, 'Memphis, TN', 'America/Chicago'),
( 61.17, -150.02, 'Anchorage, AK', 'America/Anchorage'),
( 44.12, -123.22, 'Eugene, OR', 'America/Los_Angeles'),
( 42.652647, -73.756371, 'Albany, NY', 'America/New_York'),
( 55.743749, 37.6207923, 'Moscow', 'Europe/Moscow'),
( 34.104255, -118.4055591, 'Los Angeles', 'America/Los_Angeles'),
( 55.743749, 37.6207923, 'Moscow', 'Europe/Moscow'),
( 39.194991, -106.8294024, 'Aspen, Colorado', 'America/Denver'),
( 50.438114, 30.5179595, 'Kiev', 'Europe/Kiev'),
( 12.936873, 77.6909136, 'Jogupalya', 'Asia/Kolkata'),
( 38.889144, -77.0398235, 'Washington DC', 'America/New_York'),
( 59.932490, 30.3164291, 'St Petersburg', 'Europe/Moscow'),
( 50.300624, 127.559166, 'Blagoveshchensk', 'Asia/Yakutsk'),
( 42.439370, -71.0700416, 'Boston', 'America/New_York'),
( 41.84937, -87.6611995, 'Chicago', 'America/Chicago'),
( 28.626873, -81.7584514, 'Orlando', 'America/New_York'),
( 47.610615, -122.3324847, 'Seattle', 'America/Los_Angeles'),
( 51.499990, -0.1353549, 'London', 'Europe/London'),
( 51.256241, -0.8186531, 'Church Crookham', 'Europe/London'),
( 51.292215, -0.8002638, 'Fleet', 'Europe/London'),
( 48.868743, 2.3237586, 'Paris', 'Europe/Paris'),
( 22.158114, 113.5504603, 'Macau', 'Asia/Macau'),
( 56.833123, 60.6097054, 'Russia', 'Asia/Yekaterinburg'),
( 60.887496, 26.6375756, 'Salo', 'Europe/Helsinki'),
( 52.799992, -1.8524408, 'Staffordshire', 'Europe/London'),
( 5.016666, 115.0666667, 'Muara', 'Asia/Brunei'),
(-41.466666, -72.95, 'Puerto Montt seaport', 'America/Santiago'),
( 34.566666, 33.0333333, 'Akrotiri seaport', 'Asia/Nicosia'),
( 37.466666, 126.6166667, 'Inchon seaport', 'Asia/Seoul'),
( 42.8, 132.8833333, 'Nakhodka seaport', 'Asia/Vladivostok'),
( 50.26, -5.051, 'Truro', 'Europe/London'),
( 50.26, -9.051, 'Sea off Cornwall', None),
( 35.82373, -110.72144, 'Hopi Nation', 'America/Phoenix'),
( 35.751956, -110.169460, 'Deni inside Hopi Nation', 'America/Denver'),
( 68.38068073677294, -133.73396065378114, 'Upper hole in America/Yellowknife', 'America/Inuvik')
)
TEST_LOCATIONS_FORCETZ = (
( 35.295953, -89.662186, 'Arlington, TN', 'America/Chicago'),
( 33.58, -85.85, 'Memphis, TN', 'America/Chicago'),
( 61.17, -150.02, 'Anchorage, AK', 'America/Anchorage'),
( 40.7271, -73.98, 'Shore Lake Michigan', 'America/New_York'),
( 50.1536, -8.051, 'Off Cornwall', 'Europe/London'),
( 49.2698, -123.1302, 'Vancouver', 'America/Vancouver'),
( 50.26, -9.051, 'Far off Cornwall', None)
)
def _test_tzwhere(self, locations, forceTZ):
start = datetime.datetime.now()
w = tzwhere.tzwhere(forceTZ=forceTZ)
end = datetime.datetime.now()
print('Initialized in: '),
print(end - start)
template = '{0:20s} | {1:20s} | {2:20s} | {3:2s}'
print(template.format('LOCATION', 'EXPECTED', 'COMPUTED', '=='))
for (lat, lon, loc, expected) in locations:
computed = w.tzNameAt(float(lat), float(lon), forceTZ=forceTZ)
ok = 'OK' if computed == expected else 'XX'
print(template.format(loc, str(expected), str(computed), ok))
assert computed == expected
def test_lookup(self):
self._test_tzwhere(self.TEST_LOCATIONS,forceTZ=False)
def test_forceTZ(self):
self._test_tzwhere(self.TEST_LOCATIONS_FORCETZ,forceTZ=True)
|
class Solution:
def __init__(self, string: str):
self.string = string
def toQuestion(self):
pass
def main():
givenString1 = Solution("enter_string")
print(givenString1.toQuestion())
givenString2 = Solution("enter_2nd_string")
print(givenString2.toQuestion())
if __name__ == '__main__':
main()
####################
# String
####################
# class Solution:
# def __init__(self, string:str, key:int) -> int:
# self.string = string
# self.key = key
# def caesarCipherEncryptor(self):
# newLetters = []
# newKey = self.key % 26
# alphabet = list("abcdefghijklmnopqrstuvwxyz")
# for letter in self.string:
# newLetters.append(self.getNewLetter(letter, newKey, alphabet)) # this function calls it's sub function
# return "".join(newLetters)
# def getNewLetter(self, letter, key, alphabet):
# newLetterCode = alphabet.index(letter) + self.key
# return alphabet[newLetterCode % 26]
# def main():
# givenString1 = Solution("xyz", 2)
# print(givenString1.caesarCipherEncryptor())
# # don't need to call getNewLetter sub function
# if __name__ == '__main__':
# main()
####################
# Array
####################
# class Dictionary:
# def __init__(self, dictionary):
# self.dictionary = dictionary
# def sort_ascending(self):
# return sorted(self.dictionary.items(), key=lambda x: x[1])
# def sort_descending(self):
# return sorted(self.dictionary.items(), key=lambda x: x[1], reverse=True)
# def main():
# dictionary = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}
# print(Dictionary(dictionary).sort_ascending())
# print(Dictionary(dictionary).sort_descending())
# if __name__ == '__main__':
# main()
####################
# self is the only parameter
####################
# class Solution:
# def fizzbuzz(self):
# for i in range(1, 101): # 0 - 100
# if i % 3 == 0 and i % 5 == 0:
# print("Fizz Buzz")
# elif i % 3 == 0:
# print("Fizz")
# elif i % 5 == 0:
# print("Buzz")
# else:
# print(i)
# def main():
# p1 = Solution()
# p1.fizzbuzz()
# if __name__ == '__main__':
# main()
#########
# # Max Subarray with Unit Test
#########
# """ Note:
# Kadane's Algorithm
# """
# Max Subarray
# # O(n) time, O(1) space
# class Solution:
# def maxSubArray(self, nums) -> int:
# max_sum, current_sum = -float('inf'), 0
# for num in nums:
# current_sum = max(num, current_sum + num) # if nums > current_sum + nums, then current_sum + nums is the new current_sum
# max_sum = max(current_sum, max_sum) # if current_sum > max_sum, then current_sum is the new max_sum
# return max_sum
# import unittest
# class Test(unittest.TestCase):
# def test(self):
# test = Solution()
# nums = [-2,1,-3,4,-1,2,1,-5,4]
# self.assertEqual(test.maxSubArray(nums), 6)
# def test2(self):
# test = Solution()
# nums = [-2,1]
# self.assertEqual(test.maxSubArray(nums), 1)
# def main():
# unittest.main()
# if __name__ == "__main__":
# main()
|
import efficientnet.tfkeras
from tensorflow.keras.models import load_model
from tensorflow import nn
from tensorflow.keras.backend import shape
from tensorflow.keras.layers import Dropout
import segmentation_models as sm
import tf2onnx
import onnxruntime as rt
import tensorflow as tf
class FixedDropout(Dropout):
def _get_noise_shape(self, inputs):
if self.noise_shape is None:
return self.noise_shape
return tuple([shape(inputs)[i] if sh is None else sh for i, sh in enumerate(self.noise_shape)])
customObjects = {
'swish': nn.swish,
'FixedDropout': FixedDropout,
'dice_loss_plus_1binary_focal_loss': sm.losses.binary_focal_dice_loss,
'iou_score': sm.metrics.iou_score,
'f1-score': sm.metrics.f1_score
}
def main():
#semantic_model = keras.models.load_model(args.hdf5_file)
#h5file = '/home/mohan/git/backups/segmentation_models/examples/best_mode_model_filel.h5'
h5file = '/home/mohan/git/Thesis_Repos/segmentation_models/examples/ss_unet_fmodel_new_70.h5'
semantic_model = load_model(h5file, custom_objects=customObjects)
spec = (tf.TensorSpec((None, 320, 480, 3), tf.float32, name="input"),)
#output_path = semantic_model.name + ".onnx"
output_path = "seg_model_unet_ep100_new_op13" + ".onnx"
model_proto, _ = tf2onnx.convert.from_keras(semantic_model, input_signature=spec, opset=13, output_path=output_path)
output_names = [n.name for n in model_proto.graph.output]
print(output_names)
print('done')
if __name__ == "__main__":
main()
|
from PyQt5 import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from deritradeterminal.util.deribit_api import RestClient
from deritradeterminal.managers.ConfigManager import ConfigManager
class PositionsUpdateThread(QThread):
signeler = pyqtSignal(int,str,str,str,str,str,str,str)
def collectProcessData(self):
try:
config = ConfigManager.get_config()
index = 0
for x in config.tradeApis:
client = RestClient(config.tradeApis[x][0], config.tradeApis[x][1], ConfigManager.get_config().apiUrl)
cposition = client.positions()
if len(cposition) >= 1:
position = cposition[0]
direction = ""
if position['direction'] == "buy":
direction = "Long"
else:
direction = "Short"
self.signeler.emit(index, x, direction, str(position["size"]), str(format(position["sizeBtc"], '.8f')), str(format(position["averagePrice"], '.4f')), str(format(position["profitLoss"], '.8f')), str(format(position["initialMargin"], '.8f')))
else:
self.signeler.emit(index, x, "No Position", "", "", "", "", "")
index = index + 1
except Exception as e:
print(e)
def __init__(self):
QThread.__init__(self)
def run(self):
while True:
self.collectProcessData()
|
import os
import datetime
import re
import paho.mqtt.client as mqtt
import json
import yaml
from argparse import ArgumentParser
def args():
p = ArgumentParser()
p.add_argument("--secrets", '-s', action="store", type=str, help='Transfer specified file as secrets.json')
p.add_argument("--ignore-retained", '-r', action="store_true", help='Ignore retained messages.')
p.add_argument("--topic", '-t', action="store", type=str, help='MQTT topic to subscribe to.')
return p.parse_args()
args = args()
with open(args.secrets or 'secrets.yaml') as f:
secrets = yaml.load(f, Loader=yaml.FullLoader)
topic = args.topic or '/'.join((i for i in [
secrets['MQTT_PREFIX'],
'#',
] if i))
client = mqtt.Client("mqtt-listen")
print('Username:', secrets['MQTT_USERNAME'])
client.username_pw_set(secrets['MQTT_USERNAME'], password = secrets['MQTT_PASSWORD'])
def on_message(client, userdata, message):
try:
if not (args.ignore_retained and message.retain):
print("")
print("[%s] %s:" % (message.retain and "PERMANENT" or str(datetime.datetime.now()), message.topic))
text = message.payload.decode("utf-8")
try:
print(
json.dumps(
json.loads(
text
),
sort_keys=False,
indent=4
)
)
except:
print('[TEXT]', text)
except Exception as e:
print(e)
client.on_message = on_message
print('Connecting to:', secrets['MQTT_BROKER'])
client.connect(secrets['MQTT_BROKER'])
print('Listening to:', topic)
client.subscribe(topic)
client.loop_forever()
|
"""
manhattan.py
Minor Programmeren, Programmeertheorie, Chips & Circuits
Misbaksels: Lisa Eindhoven, Sebastiaan van der Laan & Mik Schutte
These functions calculate the (minimum/maximum) manhattan distance between
(a multitude of) two sets of coordinates. Use min/max_net for the determined
shortest distance, use min/max_nets to randomize the result in case of a tie.
"""
import random
def measure(a_coordinates, b_coordinates):
""" This function returns the manhattan distance
as measured between two sets of coordinates.
"""
x_diff = abs(a_coordinates[0] - b_coordinates[0])
y_diff = abs(a_coordinates[1] - b_coordinates[1])
z_diff = abs(a_coordinates[2] - b_coordinates[2])
return x_diff + y_diff + z_diff
def measurement_list(nets):
""" This function returns a dict of the different manhattan
distances between all the provided nets.
"""
manh_netlist = {}
for net in nets:
manh_diff = measure(net.begin_gate.coordinate(), net.end_gate.coordinate())
manh_netlist[net.id] = manh_diff
return manh_netlist
def min_net(nets):
""" Returns net.id of shortest manhattan distance
in the provided netlist.
"""
manh_netlist = measurement_list(nets)
min_net = min(manh_netlist, key=lambda k: manh_netlist[k])
return min_net
def min_nets(nets):
""" Returns a random net.id of all the equally shortest distances
in a netlist.
"""
manh_netlist = measurement_list(nets)
min_value = min(manh_netlist.values())
min_netlist = [net_id for net_id in manh_netlist if manh_netlist[net_id] == min_value]
return random.choice(min_netlist)
def max_net(nets):
""" Returns net.id of longest manhattan distance
in the provided netlist.
"""
manh_netlist = measurement_list(nets)
max_net = max(manh_netlist, key=lambda k: manh_netlist[k])
return max_net
def max_nets(nets):
""" Returns a random net.id of all the equally long
distances in a netlist.
"""
manh_netlist = measurement_list(nets)
max_value = max(manh_netlist.values())
max_netlist = [net_id for net_id in manh_netlist if manh_netlist[net_id] == max_value]
return random.choice(max_netlist)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import logging
import json
import os
import shutil
import numpy as np
from keras.callbacks import EarlyStopping
from dde.cnn_model import build_model, train_model, reset_model, save_model, write_loss_report
from dde.input import read_input_file
from dde.molecule_tensor import get_molecule_tensor, pad_molecule_tensor
from dde.data import (prepare_data_one_fold, prepare_folded_data_from_multiple_datasets,
prepare_full_train_data_from_multiple_datasets, split_inner_val_from_train_data,
prepare_folded_data_from_file, prepare_full_train_data_from_file)
from dde.layers import MoleculeConv
from dde.uncertainty import RandomMask, EnsembleModel
class Predictor(object):
def __init__(self, input_file=None, data_file=None, save_tensors_dir=None, keep_tensors=False, out_dir=None,
normalize=False):
self.model = None
self.input_file = input_file
self.data_file = data_file
self.save_tensors_dir = save_tensors_dir
self.keep_tensors = keep_tensors
self.out_dir = out_dir
self.normalize = normalize
self.datasets = None
self.add_extra_atom_attribute = None
self.add_extra_bond_attribute = None
self.differentiate_atom_type = None
self.differentiate_bond_type = None
self.padding = None
self.padding_final_size = None
self.prediction_task = None
self.y_mean = None
self.y_std = None
if self.input_file is not None:
read_input_file(self.input_file, self)
self.get_data_from_file = False
if self.data_file is not None:
if self.data_file.endswith('.csv'):
self.get_data_from_file = True
else:
self.specify_datasets(self.data_file)
def build_model(self):
"""
This method is intended to provide a way to build default model
"""
self.model = build_model()
def load_input(self, input_file):
"""
This method is intended to provide a way to build model from an input file
"""
if input_file:
self.input_file = input_file
read_input_file(self.input_file, self)
def specify_datasets(self, datasets_file_path=None):
"""
This method specifies which datasets to use for training
"""
self.datasets = []
with open(datasets_file_path, 'r') as f_in:
for line in f_in:
line = line.strip()
if line and not line.startswith('#'):
dataset, testing_ratio = [token.strip() for token in line.split(':')]
host, db, table = [token.strip() for token in dataset.split('.')]
self.datasets.append((host, db, table, float(testing_ratio)))
def kfcv_train(self, folds, lr_func, save_model_path, pretrained_weights=None,
batch_size=1, nb_epoch=150, patience=10, training_ratio=0.9, testing_ratio=0.0):
# prepare data for training
if self.get_data_from_file:
folded_data = prepare_folded_data_from_file(
self.data_file, folds,
add_extra_atom_attribute=self.add_extra_atom_attribute,
add_extra_bond_attribute=self.add_extra_bond_attribute,
differentiate_atom_type=self.differentiate_atom_type,
differentiate_bond_type=self.differentiate_bond_type,
padding=self.padding,
padding_final_size=self.padding_final_size,
save_tensors_dir=self.save_tensors_dir,
testing_ratio=testing_ratio)
else:
folded_data = prepare_folded_data_from_multiple_datasets(
self.datasets, folds,
add_extra_atom_attribute=self.add_extra_atom_attribute,
add_extra_bond_attribute=self.add_extra_bond_attribute,
differentiate_atom_type=self.differentiate_atom_type,
differentiate_bond_type=self.differentiate_bond_type,
padding=self.padding,
padding_final_size=self.padding_final_size,
prediction_task=self.prediction_task,
save_tensors_dir=self.save_tensors_dir)
X_test, y_test, folded_Xs, folded_ys = folded_data
losses, inner_val_losses, outer_val_losses, test_losses = [], [], [], []
train_rmses, inner_val_rmses, outer_val_rmses, test_rmses = [], [], [], []
train_maes, inner_val_maes, outer_val_maes, test_maes = [], [], [], []
for fold in range(folds):
data = prepare_data_one_fold(folded_Xs,
folded_ys,
current_fold=fold,
training_ratio=training_ratio)
# execute train_model
X_train, X_inner_val, X_outer_val, y_train, y_inner_val, y_outer_val = data
if self.normalize:
y_train, y_inner_val, y_outer_val, y_test = self.normalize_output(y_train,
y_inner_val,
y_outer_val,
y_test)
train_model_output = train_model(self.model,
X_train,
y_train,
X_inner_val,
y_inner_val,
X_test,
y_test,
X_outer_val,
y_outer_val,
nb_epoch=nb_epoch,
batch_size=batch_size,
lr_func=lr_func,
patience=patience,
load_from_disk=True if self.save_tensors_dir is not None else False,
save_model_path=save_model_path)
model, loss, inner_val_loss, mean_outer_val_loss, mean_test_loss = train_model_output
# loss and inner_val_loss each is a list
# containing loss for each epoch
losses.append(loss)
inner_val_losses.append(inner_val_loss)
outer_val_losses.append(mean_outer_val_loss)
test_losses.append(mean_test_loss)
# Calculate RMSEs and MAEs
train_rmse, train_mae = self.evaluate(X_train, y_train)
inner_val_rmse, inner_val_mae = self.evaluate(X_inner_val, y_inner_val)
outer_val_rmse, outer_val_mae = self.evaluate(X_outer_val, y_outer_val)
test_rmse, test_mae = self.evaluate(X_test, y_test)
train_rmses.append(train_rmse)
train_maes.append(train_mae)
inner_val_rmses.append(inner_val_rmse)
inner_val_maes.append(inner_val_mae)
outer_val_rmses.append(outer_val_rmse)
outer_val_maes.append(outer_val_mae)
test_rmses.append(test_rmse)
test_maes.append(test_mae)
# save model and write fold report
fpath = os.path.join(save_model_path, 'fold_{0}'.format(fold))
self.save_model(loss, inner_val_loss, mean_outer_val_loss, mean_test_loss, fpath,
train_rmse=train_rmse, train_mae=train_mae,
inner_val_rmse=inner_val_rmse, inner_val_mae=inner_val_mae,
outer_val_rmse=outer_val_rmse, outer_val_mae=outer_val_mae,
test_rmse=test_rmse, test_mae=test_mae)
# once finish training one fold, reset the model
if pretrained_weights is not None:
self.load_parameters(pretrained_weights)
else:
self.reset_model()
# mean inner_val_loss and outer_val_loss used for selecting parameters,
# e.g., lr, epoch, attributes, etc
full_folds_mean_loss = np.mean([l[-1] for l in losses if len(l) > 0])
full_folds_mean_inner_val_loss = np.mean([l[-1] for l in inner_val_losses if len(l) > 0])
full_folds_mean_outer_val_loss = np.mean(outer_val_losses)
full_folds_mean_test_loss = np.mean(test_losses)
full_folds_mean_train_rmse = np.mean(train_rmses)
full_folds_mean_train_mae = np.mean(train_maes)
full_folds_mean_inner_val_rmse = np.mean(inner_val_rmses)
full_folds_mean_inner_val_mae = np.mean(inner_val_maes)
full_folds_mean_outer_val_rmse = np.mean(outer_val_rmses)
full_folds_mean_outer_val_mae = np.mean(outer_val_maes)
full_folds_mean_test_rmse = np.mean(test_rmses)
full_folds_mean_test_mae = np.mean(test_maes)
full_folds_loss_report_path = os.path.join(save_model_path, 'full_folds_loss_report.txt')
write_loss_report(full_folds_mean_loss,
full_folds_mean_inner_val_loss,
full_folds_mean_outer_val_loss,
full_folds_mean_test_loss,
full_folds_loss_report_path,
train_rmse=full_folds_mean_train_rmse, train_mae=full_folds_mean_train_mae,
inner_val_rmse=full_folds_mean_inner_val_rmse, inner_val_mae=full_folds_mean_inner_val_mae,
outer_val_rmse=full_folds_mean_outer_val_rmse, outer_val_mae=full_folds_mean_outer_val_mae,
test_rmse=full_folds_mean_test_rmse, test_mae=full_folds_mean_test_mae)
# Delete tensor directory
if self.save_tensors_dir is not None:
if not self.keep_tensors:
shutil.rmtree(self.save_tensors_dir)
def full_train(self, lr_func, save_model_path,
batch_size=1, nb_epoch=150, patience=10, training_ratio=0.9, testing_ratio=0.0):
# prepare data for training
if self.get_data_from_file:
split_data = prepare_full_train_data_from_file(
self.data_file,
add_extra_atom_attribute=self.add_extra_atom_attribute,
add_extra_bond_attribute=self.add_extra_bond_attribute,
differentiate_atom_type=self.differentiate_atom_type,
differentiate_bond_type=self.differentiate_bond_type,
padding=self.padding,
padding_final_size=self.padding_final_size,
save_tensors_dir=self.save_tensors_dir,
testing_ratio=testing_ratio,
meta_dir=self.out_dir
)
else:
split_data = prepare_full_train_data_from_multiple_datasets(
self.datasets,
add_extra_atom_attribute=self.add_extra_atom_attribute,
add_extra_bond_attribute=self.add_extra_bond_attribute,
differentiate_atom_type=self.differentiate_atom_type,
differentiate_bond_type=self.differentiate_bond_type,
padding=self.padding,
padding_final_size=self.padding_final_size,
prediction_task=self.prediction_task,
save_tensors_dir=self.save_tensors_dir,
meta_dir=self.out_dir
)
X_test, y_test, X_train, y_train = split_data
losses = []
inner_val_losses = []
test_losses = []
data = split_inner_val_from_train_data(X_train, y_train, training_ratio=training_ratio)
X_train, X_inner_val, y_train, y_inner_val = data
if self.normalize:
y_train, y_inner_val, y_test = self.normalize_output(y_train, y_inner_val, y_test)
# execute train_model
logging.info('\nStart full training...')
logging.info('Training data: {} points'.format(len(X_train)))
logging.info('Inner val data: {} points'.format(len(X_inner_val)))
logging.info('Test data: {} points'.format(len(X_test)))
train_model_output = train_model(self.model,
X_train,
y_train,
X_inner_val,
y_inner_val,
X_test,
y_test,
X_outer_val=None,
y_outer_val=None,
nb_epoch=nb_epoch,
batch_size=batch_size,
lr_func=lr_func,
patience=patience,
load_from_disk=True if self.save_tensors_dir is not None else False,
save_model_path=save_model_path)
model, loss, inner_val_loss, mean_outer_val_loss, mean_test_loss = train_model_output
# loss and inner_val_loss each is a list
# containing loss for each epoch
losses.append(loss)
inner_val_losses.append(inner_val_loss)
test_losses.append(mean_test_loss)
# Calculate RMSEs and MAEs
train_rmse, train_mae = self.evaluate(X_train, y_train)
inner_val_rmse, inner_val_mae = self.evaluate(X_inner_val, y_inner_val)
test_rmse, test_mae = self.evaluate(X_test, y_test)
# save model and write report
fpath = os.path.join(save_model_path, 'full_train')
self.save_model(loss, inner_val_loss, mean_outer_val_loss, mean_test_loss, fpath,
train_rmse=train_rmse, train_mae=train_mae,
inner_val_rmse=inner_val_rmse, inner_val_mae=inner_val_mae,
test_rmse=test_rmse, test_mae=test_mae)
# Delete tensor directory
if self.save_tensors_dir is not None:
if not self.keep_tensors:
shutil.rmtree(self.save_tensors_dir)
def kfcv_batch_train(self, folds, pretrained_weights=None,
batch_size=50, nb_epoch=150, patience=10, training_ratio=0.9, testing_ratio=0.0):
# prepare data for training
if self.get_data_from_file:
folded_data = prepare_folded_data_from_file(
self.data_file, folds,
add_extra_atom_attribute=self.add_extra_atom_attribute,
add_extra_bond_attribute=self.add_extra_bond_attribute,
differentiate_atom_type=self.differentiate_atom_type,
differentiate_bond_type=self.differentiate_bond_type,
padding=self.padding,
padding_final_size=self.padding_final_size,
save_tensors_dir=self.save_tensors_dir,
testing_ratio=testing_ratio)
else:
folded_data = prepare_folded_data_from_multiple_datasets(
self.datasets, folds,
add_extra_atom_attribute=self.add_extra_atom_attribute,
add_extra_bond_attribute=self.add_extra_bond_attribute,
differentiate_atom_type=self.differentiate_atom_type,
differentiate_bond_type=self.differentiate_bond_type,
padding=self.padding,
padding_final_size=self.padding_final_size,
prediction_task=self.prediction_task,
save_tensors_dir=self.save_tensors_dir)
X_test, y_test, folded_Xs, folded_ys = folded_data
# Data might be stored as file names
if len(X_test) > 0:
if isinstance(X_test[0], str):
dims = np.load(X_test[0]).shape
X_test_new = np.zeros((len(X_test),) + dims)
for i, fname in enumerate(X_test):
X_test_new[i] = np.load(fname)
X_test = X_test_new
for fold in range(folds):
data = prepare_data_one_fold(folded_Xs,
folded_ys,
current_fold=fold,
training_ratio=training_ratio)
X_train, X_inner_val, X_outer_val, y_train, y_inner_val, y_outer_val = data
if isinstance(X_train, np.ndarray):
X_train = np.concatenate((X_train, X_inner_val))
else:
X_train.extend(X_inner_val)
if isinstance(y_train, np.ndarray):
y_train = np.concatenate((y_train, y_inner_val))
else:
y_train.extend(y_inner_val)
# Data might be stored as file names
if isinstance(X_train[0], str):
dims = np.load(X_train[0]).shape
X_train_new = np.zeros((len(X_train),) + dims)
X_outer_val_new = np.zeros((len(X_outer_val),) + dims)
for i, fname in enumerate(X_train):
X_train_new[i] = np.load(fname)
for i, fname in enumerate(X_outer_val):
X_outer_val_new[i] = np.load(fname)
X_train = X_train_new
X_outer_val = X_outer_val_new
if self.normalize:
y_train, y_outer_val, y_test = self.normalize_output(y_train, y_outer_val, y_test)
earlyStopping = EarlyStopping(monitor='val_loss', patience=patience, verbose=1, mode='auto')
history_callback = self.model.fit(np.asarray(X_train),
np.asarray(y_train),
callbacks=[earlyStopping],
nb_epoch=nb_epoch,
batch_size=batch_size,
validation_split=1.0-training_ratio)
loss_history = history_callback.history
with open(os.path.join(self.out_dir, 'history.json_fold_{0}'.format(fold)), 'w') as f_in:
json.dump(loss_history, f_in, indent=2)
# evaluate outer validation loss
outer_val_loss = self.model.evaluate(np.asarray(X_outer_val),
np.asarray(y_outer_val),
batch_size=50)
logging.info("\nOuter val loss: {0}".format(outer_val_loss))
if len(X_test) > 0:
test_loss = self.model.evaluate(np.asarray(X_test), np.asarray(y_test), batch_size=50)
logging.info("\nTest loss: {0}".format(test_loss))
# once finish training one fold, reset the model
if pretrained_weights is not None:
self.load_parameters(pretrained_weights)
else:
self.reset_model()
# Delete tensor directory
if self.save_tensors_dir is not None:
if not self.keep_tensors:
shutil.rmtree(self.save_tensors_dir)
def load_architecture(self, param_path=None):
from keras.models import model_from_json
f = open(param_path,'r').read()
self.model = model_from_json(json.loads(f),
custom_objects={"EnsembleModel":EnsembleModel,
"RandomMask":RandomMask, "MoleculeConv":MoleculeConv})
def normalize_output(self, y_train, *other_ys):
y_train = np.asarray(y_train)
self.y_mean = np.mean(y_train, axis=0)
self.y_std = np.std(y_train, axis=0)
logging.info('Mean: {}, std: {}'.format(self.y_mean, self.y_std))
y_train = (y_train - self.y_mean) / self.y_std
other_ys = tuple((np.asarray(y) - self.y_mean) / self.y_std for y in other_ys)
return (y_train,) + other_ys
def load_parameters(self, param_path=None, mean_and_std_path=None):
if param_path is not None:
self.model.load_weights(param_path)
logging.info('Loaded weights from {}'.format(param_path))
if mean_and_std_path is not None:
npzfile = np.load(mean_and_std_path)
self.y_mean = npzfile['mean']
self.y_std = npzfile['std']
logging.info('Loaded mean and std from {}'.format(mean_and_std_path))
def reset_model(self):
self.model = reset_model(self.model)
def save_model(self, loss, inner_val_loss, mean_outer_val_loss, mean_test_loss, fpath, **kwargs):
save_model(self.model, loss, inner_val_loss, mean_outer_val_loss, mean_test_loss, fpath, **kwargs)
if self.y_mean is not None and self.y_std is not None:
np.savez(fpath + '_mean_std.npz', mean=self.y_mean, std=self.y_std)
logging.info('...saved y mean and standard deviation to {}_mean_std.npz'.format(fpath))
def predict(self, molecule=None, molecule_tensor=None, sigma=False):
"""
Predict the output given a molecule. If a tensor is specified, it
overrides the molecule argument.
"""
if molecule is None and molecule_tensor is None:
raise Exception('No molecule is specified...')
if molecule_tensor is None:
molecule_tensor = get_molecule_tensor(molecule,
self.add_extra_atom_attribute,
self.add_extra_bond_attribute,
self.differentiate_atom_type,
self.differentiate_bond_type)
if self.padding:
molecule_tensor = pad_molecule_tensor(molecule_tensor, self.padding_final_size)
molecule_tensor_array = np.array([molecule_tensor])
if sigma:
y_pred, y_sigma = self.model.predict(molecule_tensor_array, sigma=sigma)
if self.y_mean is not None and self.y_std is not None:
y_pred = y_pred * self.y_std + self.y_mean
y_sigma = y_sigma * self.y_std
if self.prediction_task == "Cp(cal/mol/K)":
return y_pred[0], y_sigma[0]
else:
return y_pred[0][0], y_sigma[0][0]
else:
y_pred = self.model.predict(molecule_tensor_array)
if self.y_mean is not None and self.y_std is not None:
y_pred = y_pred * self.y_std + self.y_mean
if self.prediction_task == "Cp(cal/mol/K)":
return y_pred[0]
else:
return y_pred[0][0]
def evaluate(self, X, y):
"""
Evaluate RMSE and MAE given a list or array of file names or tensors
and a list or array of outputs.
"""
y_pred = []
for x in X:
if self.save_tensors_dir is not None:
x = np.load(x)
y_pred.append(self.predict(molecule_tensor=x))
y_pred = np.array(y_pred).flatten()
y = np.asarray(y)
if self.y_mean is not None and self.y_std is not None:
y = y * self.y_std + self.y_mean
y = y.flatten()
diff = y - y_pred
rmse = np.sqrt(np.dot(diff.T, diff) / len(y))
mae = np.sum(np.abs(diff)) / len(y)
return rmse, mae
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-30 11:38
from __future__ import unicode_literals
import consent.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('consent', '0020_auto_20170530_1615'),
]
operations = [
migrations.AlterField(
model_name='educationdetail',
name='resume',
field=models.FileField(blank=True, null=True, storage=consent.models.OverwriteStorage(), upload_to=consent.models.resume_file_path),
),
]
|
import unittest
import math
import io
from pathlib import Path
import vcfpy
from cerebra.germline_filter import write_filtered_vcf
from cerebra.utils import GenomePosition, GenomeIntervalTree
class GermlineFilterTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
self.data_path = Path(__file__).parent / "data" / "test_germline_filter"
def test_germline_filter(self):
filtered_cell_vcf_paths = self.data_path.glob("GF_*.vcf")
for filtered_cell_vcf_path in filtered_cell_vcf_paths:
cell_name = filtered_cell_vcf_path.stem.replace("GF_", '')
cell_vcf_path = self.data_path / (cell_name + ".vcf")
germline_vcf_paths = self.data_path.glob(cell_name + "_GL*.vcf")
# Create germline genome tree
germline_vcf_records = [list(vcfpy.Reader.from_path(path)) for path in germline_vcf_paths]
# Flatten records
germline_vcf_records = sum(germline_vcf_records, [])
germline_genome_tree = GenomeIntervalTree(GenomePosition.from_vcf_record, germline_vcf_records)
# Test writing VCF
with io.StringIO() as out_file:
with open(cell_vcf_path, mode='r') as in_file:
write_filtered_vcf(in_file, germline_genome_tree, out_file)
# Reset the buffer's cursor position
out_file.seek(0)
with open(filtered_cell_vcf_path, mode='r') as expected_file:
expected_reader = vcfpy.Reader.from_stream(expected_file)
out_reader = vcfpy.Reader.from_stream(out_file)
expected_records, out_records = list(expected_reader), list(out_reader)
self.assertEqual(expected_records, out_records)
if __name__ == "__main__":
unittest.main()
|
import hashlib
import threading
readlock = threading.Lock()
writelock = threading.Lock()
def get_hash(data):
if type(data) != bytes:
# wierd hack
# be warned
data = data.encode('utf-8')
gfg = hashlib.sha3_256()
gfg.update(data)
return gfg.hexdigest()
empty_hash = get_hash('')
class BlockList():
def __init__(self) -> None:
self.blocks = []
self.block_map = {}
self.last_hash = empty_hash
def add(self, value, hash, sign_hash):
readlock.acquire()
self.blocks.append({"hash": hash, "value": value, "sign": sign_hash})
self.block_map[hash] = {
"messages": value, "sign_hash": sign_hash, "prev_hash": self.last_hash}
self.last_hash = hash
readlock.release()
def all_blocks(self):
return list(self.block_map.keys())
|
# coding: utf-8
"""
NRF NFDiscovery Service
NRF NFDiscovery Service. © 2020, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TSDSI, TTA, TTC). All rights reserved. # noqa: E501
The version of the OpenAPI document: 1.1.0.alpha-4
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from com.h21lab.TS29510_Nnrf_NFDiscovery.model.stored_search__document_api import StoredSearchDocumentApi # noqa: E501
from openapi_client.rest import ApiException
class TestStoredSearchDocumentApi(unittest.TestCase):
"""StoredSearchDocumentApi unit test stubs"""
def setUp(self):
self.api = com.h21lab.TS29510_Nnrf_NFDiscovery.model.stored_search__document_api.StoredSearchDocumentApi() # noqa: E501
def tearDown(self):
pass
def test_retrieve_stored_search(self):
"""Test case for retrieve_stored_search
"""
pass
if __name__ == '__main__':
unittest.main()
|
import numpy as np
from pandas import Categorical, Series
import pandas._testing as tm
class TestUnique:
def test_unique_data_ownership(self):
# it works! GH#1807
Series(Series(["a", "c", "b"]).unique()).sort_values()
def test_unique(self):
# GH#714 also, dtype=float
ser = Series([1.2345] * 100)
ser[::2] = np.nan
result = ser.unique()
assert len(result) == 2
# explicit f4 dtype
ser = Series([1.2345] * 100, dtype="f4")
ser[::2] = np.nan
result = ser.unique()
assert len(result) == 2
def test_unique_nan_object_dtype(self):
# NAs in object arrays GH#714
ser = Series(["foo"] * 100, dtype="O")
ser[::2] = np.nan
result = ser.unique()
assert len(result) == 2
def test_unique_none(self):
# decision about None
ser = Series([1, 2, 3, None, None, None], dtype=object)
result = ser.unique()
expected = np.array([1, 2, 3, None], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_unique_categorical(self):
# GH#18051
cat = Categorical([])
ser = Series(cat)
result = ser.unique()
tm.assert_categorical_equal(result, cat)
cat = Categorical([np.nan])
ser = Series(cat)
result = ser.unique()
tm.assert_categorical_equal(result, cat)
|
#!/bin/python3
# from matplotlib import colors, cm
# from matplotlib.ticker import PercentFormatter
# import matplotlib.pyplot as plt
from scipy import stats
from subprocess import Popen, PIPE
import numpy as np
import pathlib, json, glob, os
import random
import math
## Commented out counters are zero at the 99th percentile in both fast and slow cases.
##
## TODO: figure out how to handle MSR_PEBS_FRONTEND.EVTSEL events (which have event=A6 umask=01)
## TODO: handle MEM_TRANS_RETIRED.LOAD_LATENCY_GT_* events (with event=CD umask=01)
counters = {
"counts": "--event=00 --umask=03",
"CPU_CLK_UNHALTED.REF_TSC": "--event=00 --umask=03",
"LD_BLOCKS.STORE_FORWARD": "--event=03 --umask=02",
"LD_BLOCKS.NO_SR": "--event=03 --umask=08",
"LD_BLOCKS_PARTIAL.ADDRESS_ALIAS": "--event=07 --umask=01",
"DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK": "--event=08 --umask=01",
"DTLB_LOAD_MISSES.WALK_COMPLETED_4K": "--event=08 --umask=02",
"DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M": "--event=08 --umask=04",
"DTLB_LOAD_MISSES.WALK_COMPLETED_1G": "--event=08 --umask=08",
"DTLB_LOAD_MISSES.WALK_COMPLETED": "--event=08 --umask=0E",
"DTLB_LOAD_MISSES.WALK_PENDING": "--event=08 --umask=10",
"DTLB_LOAD_MISSES.WALK_ACTIVE": "--event=08 --umask=10 --counter-mask=1",
"DTLB_LOAD_MISSES.STLB_HIT": "--event=08 --umask=20",
"INT_MISC.RECOVERY_CYCLES": "--event=0D --umask=01",
"INT_MISC.RECOVERY_CYCLES_ANY": "--event=0D --umask=01 --anyt",
"INT_MISC.CLEAR_RESTEER_CYCLES": "--event=0D --umask=80",
"UOPS_ISSUED.ANY": "--event=0E --umask=01",
"UOPS_ISSUED.STALL_CYCLES": "--event=0E --umask=01 --counter-mask=1 --invert",
"UOPS_ISSUED.VECTOR_WIDTH_MISMATCH": "--event=0E --umask=02",
"UOPS_ISSUED.SLOW_LEA": "--event=0E --umask=20",
"ARITH.DIVIDER_ACTIVE": "--event=14 --umask=01",
"L2_RQSTS.DEMAND_DATA_RD_MISS": "--event=24 --umask=21",
"L2_RQSTS.RFO_MISS": "--event=24 --umask=22",
"L2_RQSTS.CODE_RD_MISS": "--event=24 --umask=24",
"L2_RQSTS.ALL_DEMAND_MISS": "--event=24 --umask=27",
"L2_RQSTS.PF_MISS": "--event=24 --umask=38",
"L2_RQSTS.MISS": "--event=24 --umask=3F",
"L2_RQSTS.DEMAND_DATA_RD_HIT": "--event=24 --umask=41",
"L2_RQSTS.RFO_HIT": "--event=24 --umask=42",
"L2_RQSTS.CODE_RD_HIT": "--event=24 --umask=44",
"L2_RQSTS.PF_HIT": "--event=24 --umask=D8",
"L2_RQSTS.ALL_DEMAND_DATA_RD": "--event=24 --umask=E1",
"L2_RQSTS.ALL_RFO": "--event=24 --umask=E2",
"L2_RQSTS.ALL_CODE_RD": "--event=24 --umask=E4",
"L2_RQSTS.ALL_DEMAND_REFERENCES": "--event=24 --umask=E7",
"L2_RQSTS.ALL_PF": "--event=24 --umask=E8",
"L2_RQSTS.REFERENCES": "--event=24 --umask=FF",
"CORE_POWER.LVL0_TURBO_LICENSE": "--event=28 --umask=07",
"CORE_POWER.LVL1_TURBO_LICENSE": "--event=28 --umask=18",
"CORE_POWER.LVL2_TURBO_LICENSE": "--event=28 --umask=20",
"CORE_POWER.THROTTLE": "--event=28 --umask=40",
"LONGEST_LAT_CACHE.MISS": "--event=2E --umask=41",
"LONGEST_LAT_CACHE.REFERENCE": "--event=2E --umask=4F",
"CPU_CLK_UNHALTED.THREAD_P": "--event=3C --umask=00",
"CPU_CLK_UNHALTED.THREAD_P_ANY": "--event=3C --umask=00 --anyt",
"CPU_CLK_UNHALTED.RING0_TRANS": "--event=3C --umask=00 --counter-mask=1 --edge-detect",
"CPU_CLK_UNHALTED.REF_XCLK": "--event=3C --umask=01",
"CPU_CLK_UNHALTED.REF_XCLK_ANY": "--event=3C --umask=01 --anyt",
"CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE": "--event=3C --umask=02",
"L1D_PEND_MISS.PENDING": "--event=48 --umask=01",
"L1D_PEND_MISS.PENDING_CYCLES": "--event=48 --umask=01 --counter-mask=1",
"L1D_PEND_MISS.PENDING_CYCLES_ANY": "--event=48 --umask=01 --counter-mask=1 --anyt",
"L1D_PEND_MISS.FB_FULL": "--event=48 --umask=02",
"DTLB_STORE_MISSES.MISS_CAUSES_A_WALK": "--event=49 --umask=01",
"DTLB_STORE_MISSES.WALK_COMPLETED_4K": "--event=49 --umask=02",
"DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M": "--event=49 --umask=04",
"DTLB_STORE_MISSES.WALK_COMPLETED_1G": "--event=49 --umask=08",
"DTLB_STORE_MISSES.WALK_COMPLETED": "--event=49 --umask=0e",
"DTLB_STORE_MISSES.WALK_PENDING": "--event=49 --umask=10",
"DTLB_STORE_MISSES.WALK_ACTIVE": "--event=49 --umask=10 --counter-mask=1",
"DTLB_STORE_MISSES.STLB_HIT": "--event=49 --umask=20",
"LOAD_HIT_PRE.SW_PF": "--event=4C --umask=01",
"EPT.WALK_PENDING": "--event=4F --umask=10",
"L1D.REPLACEMENT": "--event=51 --umask=01",
"TX_MEM.ABORT_CONFLICT": "--event=54 --umask=01",
"TX_MEM.ABORT_CAPACITY": "--event=54 --umask=02",
"TX_MEM.ABORT_CAPACITY": "--event=54 --umask=04",
"TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY": "--event=54 --umask=08",
"TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH": "--event=54 --umask=10",
"TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT": "--event=54 --umask=20",
"TX_MEM.HLE_ELISION_BUFFER_FULL": "--event=54 --umask=40",
"TX_EXEC.MISC1": "--event=5D --umask=01",
"TX_EXEC.MISC2": "--event=5D --umask=02",
"TX_EXEC.MISC3": "--event=5D --umask=04",
"TX_EXEC.MISC4": "--event=5D --umask=08",
"TX_EXEC.MISC5": "--event=5D --umask=10",
"RS_EVENTS.EMPTY_CYCLES": "--event=5E --umask=01",
"RS_EVENTS.EMPTY_END": "--event=5E --umask=01 --counter-mask=1 --edge-detect --invert",
"OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD": "--event=60 --umask=01",
"OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD": "--event=60 --umask=01 --counter-mask=1",
"OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6": "--event=60 --umask=01 --counter-mask=6",
"OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD": "--event=60 --umask=02 --counter-mask=1",
"OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO": "--event=60 --umask=04 --counter-mask=1",
"OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD": "--event=60 --umask=08",
"OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD": "--event=60 --umask=08 --counter-mask=1",
"OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD": "--event=60 --umask=10",
"OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD": "--event=60 --umask=10 --counter-mask=1",
"OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6": "--event=60 --umask=10 --counter-mask=6",
"IDQ.MITE_UOPS": "--event=79 --umask=04",
"IDQ.MITE_CYCLES": "--event=79 --umask=04 --counter-mask=1",
"IDQ.DSB_UOPS": "--event=79 --umask=08",
"IDQ.DSB_CYCLES": "--event=79 --umask=08 --counter-mask=1",
"IDQ.MS_DSB_CYCLES": "--event=79 --umask=10",
"IDQ.ALL_DSB_CYCLES_4_UOPS": "--event=79 --umask=18 --counter-mask=4",
"IDQ.ALL_DSB_CYCLES_ANY_UOPS": "--event=79 --umask=18 --counter-mask=1",
"IDQ.MS_MITE_UOPS": "--event=79 --umask=20",
"IDQ.ALL_MITE_CYCLES_4_UOPS": "--event=79 --umask=24 --counter-mask=4",
"IDQ.ALL_MITE_CYCLES_ANY_UOPS": "--event=79 --umask=24 --counter-mask=1",
"IDQ.MS_CYCLES": "--event=79 --umask=30 --counter-mask=1",
"IDQ.MS_SWITCHES": "--event=79 --umask=30 --counter-mask=1 --edge-detect",
"IDQ.MS_UOPS": "--event=79 --umask=30",
"ICACHE_16B.IFDATA_STALL": "--event=80 --umask=04",
"ICACHE_64B.IFTAG_HIT": "--event=83 --umask=01",
"ICACHE_64B.IFTAG_MISS": "--event=83 --umask=02",
"ICACHE_64B.IFTAG_STALL": "--event=83 --umask=04",
"ITLB_MISSES.MISS_CAUSES_A_WALK": "--event=85 --umask=01",
"ITLB_MISSES.WALK_COMPLETED_4K": "--event=85 --umask=02",
"ITLB_MISSES.WALK_COMPLETED_2M_4M": "--event=85 --umask=04",
"ITLB_MISSES.WALK_COMPLETED_1G": "--event=85 --umask=08",
"ITLB_MISSES.WALK_COMPLETED": "--event=85 --umask=0E",
"ITLB_MISSES.WALK_PENDING": "--event=85 --umask=10",
"ITLB_MISSES.WALK_ACTIVE": "--event=85 --umask=10 --counter-mask=1",
"ITLB_MISSES.STLB_HIT": "--event=85 --umask=20",
"ILD_STALL.LCP": "--event=87 --umask=01",
"IDQ_UOPS_NOT_DELIVERED.CORE": "--event=9C --umask=01",
"IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE": "--event=9C --umask=01 --counter-mask=4",
"IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE": "--event=9C --umask=01 --counter-mask=3",
"IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE": "--event=9C --umask=01 --counter-mask=2",
"IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE": "--event=9C --umask=01 --counter-mask=1",
"IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK": "--event=9C --umask=01 --counter-mask=1 --invert",
"UOPS_DISPATCHED_PORT.PORT_0": "--event=A1 --umask=01",
"UOPS_DISPATCHED_PORT.PORT_1": "--event=A1 --umask=02",
"UOPS_DISPATCHED_PORT.PORT_2": "--event=A1 --umask=04",
"UOPS_DISPATCHED_PORT.PORT_3": "--event=A1 --umask=08",
"UOPS_DISPATCHED_PORT.PORT_4": "--event=A1 --umask=10",
"UOPS_DISPATCHED_PORT.PORT_5": "--event=A1 --umask=20",
"UOPS_DISPATCHED_PORT.PORT_6": "--event=A1 --umask=40",
"UOPS_DISPATCHED_PORT.PORT_7": "--event=A1 --umask=80",
"RESOURCE_STALLS.ANY": "--event=A2 --umask=01",
"RESOURCE_STALLS.SB": "--event=A2 --umask=08",
"CYCLE_ACTIVITY.CYCLES_L2_MISS": "--event=A3 --umask=01 --counter-mask=1",
"CYCLE_ACTIVITY.CYCLES_L3_MISS": "--event=A3 --umask=02 --counter-mask=2",
"CYCLE_ACTIVITY.STALLS_TOTAL": "--event=A3 --umask=04 --counter-mask=4",
"CYCLE_ACTIVITY.STALLS_L2_MISS": "--event=A3 --umask=05 --counter-mask=5",
"CYCLE_ACTIVITY.STALLS_L3_MISS": "--event=A3 --umask=06 --counter-mask=6",
"CYCLE_ACTIVITY.CYCLES_L1D_MISS": "--event=A3 --umask=08 --counter-mask=8",
"CYCLE_ACTIVITY.STALLS_L1D_MISS": "--event=A3 --umask=0C --counter-mask=12",
"CYCLE_ACTIVITY.CYCLES_MEM_ANY": "--event=A3 --umask=10 --counter-mask=16",
"CYCLE_ACTIVITY.STALLS_MEM_ANY": "--event=A3 --umask=14 --counter-mask=20",
"EXE_ACTIVITY.EXE_BOUND_0_PORTS": "--event=A6 --umask=01",
"EXE_ACTIVITY.1_PORTS_UTIL": "--event=A6 --umask=02",
"EXE_ACTIVITY.2_PORTS_UTIL": "--event=A6 --umask=04",
"EXE_ACTIVITY.3_PORTS_UTIL": "--event=A6 --umask=08",
"EXE_ACTIVITY.4_PORTS_UTIL": "--event=A6 --umask=10",
"EXE_ACTIVITY.BOUND_ON_STORES": "--event=A6 --umask=40",
"LSD.UOPS": "--event=A8 --umask=01",
"LSD.CYCLES_ACTIVE": "--event=A8 --umask=01 --counter-mask=1",
"LSD.CYCLES_4_UOPS": "--event=A8 --umask=01 --counter-mask=4",
"DSB2MITE_SWITCHES.PENALTY_CYCLES": "--event=AB --umask=02",
"ITLB.ITLB_FLUSH": "--event=AE --umask=01",
"OFFCORE_REQUESTS.DEMAND_DATA_RD": "--event=B0 --umask=01",
"OFFCORE_REQUESTS.DEMAND_CODE_RD": "--event=B0 --umask=02",
"OFFCORE_REQUESTS.DEMAND_RFO": "--event=B0 --umask=04",
"OFFCORE_REQUESTS.ALL_DATA_RD": "--event=B0 --umask=08",
"OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD": "--event=B0 --umask=10",
"OFFCORE_REQUESTS.ALL_REQUESTS": "--event=B0 --umask=80",
"UOPS_EXECUTED.THREAD": "--event=B1 --umask=01",
"UOPS_EXECUTED.STALL_CYCLES": "--event=B1 --umask=01 --counter-mask=1 --invert",
"UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC": "--event=B1 --umask=01 --counter-mask=1",
"UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC": "--event=B1 --umask=01 --counter-mask=2",
"UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC": "--event=B1 --umask=01 --counter-mask=3",
"UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC": "--event=B1 --umask=01 --counter-mask=4",
"UOPS_EXECUTED.CORE": "--event=B1 --umask=02",
"UOPS_EXECUTED.CORE_CYCLES_GE_1": "--event=B1 --umask=02 --counter-mask=1",
"UOPS_EXECUTED.CORE_CYCLES_GE_2": "--event=B1 --umask=02 --counter-mask=2",
"UOPS_EXECUTED.CORE_CYCLES_GE_3": "--event=B1 --umask=02 --counter-mask=3",
"UOPS_EXECUTED.CORE_CYCLES_GE_4": "--event=B1 --umask=02 --counter-mask=4",
"UOPS_EXECUTED.CORE_CYCLES_NONE": "--event=B1 --umask=02 --counter-mask=1 --invert",
"UOPS_EXECUTED.X87": "--event=B1 --umask=10",
"OFFCORE_REQUESTS_BUFFER.SQ_FULL": "--event=B2 --umask=01",
"TLB_FLUSH.DTLB_THREAD": "--event=BD --umask=01",
"TLB_FLUSH.STLB_ANY": "--event=BD --umask=20",
"INST_RETIRED.ANY_P": "--event=C0 --umask=00",
"INST_RETIRED.PREC_DIST": "--event=C0 --umask=01",
"OTHER_ASSISTS.ANY": "--event=C1 --umask=3F",
"UOPS_RETIRED.STALL_CYCLES": "--event=C2 --umask=01 --counter-mask=1 --invert",
"UOPS_RETIRED.TOTAL_CYCLES": "--event=C2 --umask=01 --counter-mask=10 --invert",
"UOPS_RETIRED.RETIRE_SLOTS": "--event=C2 --umask=02",
"MACHINE_CLEARS.COUNT": "--event=C3 --umask=01 --counter-mask=1 --edge-detect",
"MACHINE_CLEARS.MEMORY_ORDERING": "--event=C3 --umask=02",
"MACHINE_CLEARS.SMC": "--event=C3 --umask=04",
"BR_INST_RETIRED.ALL_BRANCHES": "--event=C4 --umask=00",
"BR_INST_RETIRED.CONDITIONAL": "--event=C4 --umask=01",
"BR_INST_RETIRED.NEAR_CALL": "--event=C4 --umask=02",
"BR_INST_RETIRED.NEAR_RETURN": "--event=C4 --umask=08",
"BR_INST_RETIRED.NOT_TAKEN": "--event=C4 --umask=10",
"BR_INST_RETIRED.NEAR_TAKEN": "--event=C4 --umask=20",
"BR_INST_RETIRED.FAR_BRANCH": "--event=C4 --umask=40",
"BR_MISP_RETIRED.ALL_BRANCHES": "--event=C5 --umask=00",
"BR_MISP_RETIRED.CONDITIONAL": "--event=C5 --umask=01",
"BR_MISP_RETIRED.NEAR_CALL": "--event=C5 --umask=02",
"BR_MISP_RETIRED.NEAR_TAKEN": "--event=C5 --umask=20",
"FP_ARITH_INST_RETIRED.SCALAR_DOUBLE": "--event=C7 --umask=01",
"FP_ARITH_INST_RETIRED.SCALAR_SINGLE": "--event=C7 --umask=02",
"FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE": "--event=C7 --umask=04",
"FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE": "--event=C7 --umask=08",
"FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE": "--event=C7 --umask=10",
"FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE": "--event=C7 --umask=20",
"FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE": "--event=C7 --umask=40",
"FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE": "--event=C7 --umask=80",
"HLE_RETIRED.START": "--event=C8 --umask=01",
"HLE_RETIRED.COMMIT": "--event=C8 --umask=02",
"HLE_RETIRED.ABORTED": "--event=C8 --umask=04",
"HLE_RETIRED.ABORTED_MEM": "--event=C8 --umask=08",
"HLE_RETIRED.ABORTED_TIMER": "--event=C8 --umask=10",
"HLE_RETIRED.ABORTED_UNFRIENDLY": "--event=C8 --umask=20",
"HLE_RETIRED.ABORTED_MEMTYPE": "--event=C8 --umask=40",
"HLE_RETIRED.ABORTED_EVENTS": "--event=C8 --umask=80",
"RTM_RETIRED.START": "--event=C9 --umask=01",
"RTM_RETIRED.COMMIT": "--event=C9 --umask=02",
"RTM_RETIRED.ABORTED": "--event=C9 --umask=04",
"RTM_RETIRED.ABORTED_MEM": "--event=C9 --umask=08",
"RTM_RETIRED.ABORTED_TIMER": "--event=C9 --umask=10",
"RTM_RETIRED.ABORTED_UNFRIENDLY": "--event=C9 --umask=20",
"RTM_RETIRED.ABORTED_MEMTYPE": "--event=C9 --umask=40",
"RTM_RETIRED.ABORTED_EVENTS": "--event=C9 --umask=80",
"FP_ASSIST.ANY": "--event=CA --umask=1E --counter-mask=1",
"HW_INTERRUPTS.RECEIVED": "--event=CB --umask=01",
"ROB_MISC_EVENTS.LBR_INSERTS": "--event=CC --umask=20",
"MEM_INST_RETIRED.STLB_MISS_LOADS": "--event=D0 --umask=11",
"MEM_INST_RETIRED.STLB_MISS_STORES": "--event=D0 --umask=12",
"MEM_INST_RETIRED.LOCK_LOADS": "--event=D0 --umask=21",
"MEM_INST_RETIRED.SPLIT_LOADS": "--event=D0 --umask=41",
"MEM_INST_RETIRED.SPLIT_STORES": "--event=D0 --umask=42",
"MEM_INST_RETIRED.ALL_LOADS": "--event=D0 --umask=81",
"MEM_INST_RETIRED.ALL_STORES": "--event=D0 --umask=82",
"MEM_LOAD_RETIRED.L1_HIT": "--event=D1 --umask=01",
"MEM_LOAD_RETIRED.L2_HIT": "--event=D1 --umask=02",
"MEM_LOAD_RETIRED.L3_HIT": "--event=D1 --umask=04",
"MEM_LOAD_RETIRED.L1_MISS": "--event=D1 --umask=08",
"MEM_LOAD_RETIRED.L2_MISS": "--event=D1 --umask=10",
"MEM_LOAD_RETIRED.L3_MISS": "--event=D1 --umask=20",
"MEM_LOAD_RETIRED.FB_HIT": "--event=D1 --umask=40",
"MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS": "--event=D2 --umask=01",
"MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT": "--event=D2 --umask=02",
"MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM": "--event=D2 --umask=04",
"MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE": "--event=D2 --umask=08",
"MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM": "--event=D3 --umask=01",
"MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM": "--event=D3 --umask=02",
"MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM": "--event=D3 --umask=04",
"MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD": "--event=D3 --umask=08",
"MEM_LOAD_MISC_RETIRED.UC": "--event=D4 --umask=04",
"BACLEARS.ANY": "--event=E6 --umask=01",
"L2_TRANS.L2_WB": "--event=F0 --umask=40",
"L2_LINES_IN.ALL": "--event=F1 --umask=1F",
"L2_LINES_OUT.SILENT": "--event=F2 --umask=01",
"L2_LINES_OUT.NON_SILENT": "--event=F2 --umask=02",
"L2_LINES_OUT.USELESS_PREF": "--event=F2 --umask=04",
"SQ_MISC.SPLIT_LOCK": "--event=F4 --umask=10",
"IDI_MISC.WB_UPGRADE": "--event=FE --umask=02",
"IDI_MISC.WB_DOWNGRADE": "--event=FE --umask=04",
}
print(" counter normal slow")
for name, cmd in counters.items():
p = Popen(["target/release/seismograph", "--stdout"] + cmd.split(' '), stdout=PIPE, stderr=PIPE)
output = p.stdout.read()
if len(output) == 0:
print("{} <error>".format(name.rjust(50)))
continue
data = json.loads(output)
times = [[], []]
uops = [[], []]
values = [[], []]
for p in data["data"]:
if p["average_cycles"] < 350:
i = 0
else:
i = 1
times[i].append(float(p["average_cycles"])/2.4 + random.random() - 0.5)
uops[i].append(p["uops_retired"])
values[i].append(p["counter"])
if name == "counts":
print("median = ", np.median(times[0] + times[1]) * 2.4)
print("p90 = ", np.percentile(times[0] + times[1], 90) * 2.4)
print("p99 = ", np.percentile(times[0] + times[1], 99) * 2.4)
print("p999 = ", np.percentile(times[0] + times[1], 99.9) * 2.4)
print("{} {:^13} {:^13}".format(name.rjust(50), len(values[0]), len(values[1])))
elif int(np.percentile(values[0], 99)) == 0 and int(np.percentile(values[1], 99)) == 0:
print("{}".format(name.rjust(50)))
else:
z = abs(np.mean(values[0]) - np.mean(values[1])) / math.sqrt((np.std(values[0])/math.sqrt(len(values[0])))**2 + (np.std(values[1])/math.sqrt(len(values[1])))**2)
if z < 2.33:
print("{} \033[1;30m{:>6}/{:<6} {:>6}/{:<6} [z={:.1f}] \033[0m".format(name.rjust(50), int(np.percentile(values[0], 25)), int(np.percentile(values[0], 75)), int(np.percentile(values[1], 25)), int(np.percentile(values[1], 75)), z))
else:
print("{} {:>6}/{:<6} {:>6}/{:<6} [z={:.1f}]".format(name.rjust(50), int(np.percentile(values[0], 25)), int(np.percentile(values[0], 75)), int(np.percentile(values[1], 25)), int(np.percentile(values[1], 75)), z))
# print("{} {:>8} {:>8}".format(name.rjust(50), int(np.median(values[0])), int(np.median(values[1]))))
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^pool/(?P<pk>[0-9a-zA-Z\/]+)/$', views.UserRedirectView.as_view(), name='pool'),
url(r'^pool/(?P<pk>[\d\w_]+)$', views.pool_fix, name='pool_fix'), #allow decimal and words only.
]
|
#!/usr/bin/env python3
if not __name__ == "__main__":
exit("You should not run this application as a library!")
import pygame
import tkinter as tk
from tkinter import filedialog
FPS = 30
FONT_SIZE = 18
OUTLINE_WIDTH = 2
UI_SPACING = 10
COLOR_BUTTON_SIZE = 22
pygame.init()
window = pygame.display.set_mode((800, 600), pygame.RESIZABLE)
pygame.display.set_caption("Spriteline")
pygame.display.set_icon(pygame.image.load_basic("assets/icon.bmp"))
APP_FONT = pygame.font.SysFont("Arial", FONT_SIZE)
current_color = (0x00, 0x00, 0x00)
running = True
clock = pygame.time.Clock()
def check_window_close():
global running
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
return
class ColorPalette:
BASE = (0x18, 0x18, 0x18)
TEXT = (0xE8, 0xE8, 0xE8)
FILL = (0x4F, 0x23, 0x23)
OUTLINE = (0xB0, 0x5F, 0x5F)
CANVAS = (0x88, 0x88, 0x88)
class Button:
mouse_previously_pressed = False
def __init__(self, X: int, Y: int, TEXT: str, PADDING: int, BG: bool, ON_CLICK):
self.X = X
self.Y = Y
self.TEXT = TEXT
self._TEXT_SURFACE = APP_FONT.render(self.TEXT, False, ColorPalette.TEXT).convert()
self.BG = BG
self.PADDING = PADDING
self.WIDTH = self._TEXT_SURFACE.get_width() + (self.PADDING * 2)
self.HEIGHT = self._TEXT_SURFACE.get_height() + (self.PADDING * 2)
self.ON_CLICK = ON_CLICK
def check_click(self):
MOUSE_STATE = pygame.mouse.get_pressed()
if MOUSE_STATE[0] == True: # MB1
MOUSE_X, MOUSE_Y = pygame.mouse.get_pos()
if MOUSE_X in range(self.X, self.X + self.WIDTH) and MOUSE_Y in range(self.Y, self.Y + self.HEIGHT):
if not Button.mouse_previously_pressed:
self.ON_CLICK()
Button.mouse_previously_pressed = True
else:
Button.mouse_previously_pressed = False
def render(self):
if self.BG:
pygame.draw.rect(window, ColorPalette.FILL, (self.X, self.Y, self.WIDTH, self.HEIGHT))
pygame.draw.rect(window, ColorPalette.OUTLINE, (self.X, self.Y, self.WIDTH, self.HEIGHT), OUTLINE_WIDTH)
window.blit(self._TEXT_SURFACE, (self.X + self.PADDING, self.Y + self.PADDING))
class TopBar:
def __init__(self, X: int, Y: int, BUTTONS, PADDING: int):
self.X = X
self.Y = Y
self.BUTTONS = BUTTONS
desired_x = 0
for button_index in range(len(self.BUTTONS)):
desired_x = 0
for button in self.BUTTONS[:button_index]:
desired_x += button.WIDTH + PADDING
self.BUTTONS[button_index].X = self.X + desired_x + PADDING
self.BUTTONS[button_index].Y = self.Y + PADDING
self.WIDTH = desired_x + self.BUTTONS[len(self.BUTTONS) - 1].WIDTH + (PADDING * 2)
self.HEIGHT = self.BUTTONS[0].HEIGHT + (PADDING * 2)
def render(self):
pygame.draw.rect(window, ColorPalette.FILL, (self.X, self.Y, self.WIDTH, self.HEIGHT))
pygame.draw.rect(window, ColorPalette.OUTLINE, (self.X, self.Y, self.WIDTH, self.HEIGHT), OUTLINE_WIDTH)
for button in self.BUTTONS:
button.render()
class ColorSelectionButton:
mouse3_previously_pressed = False
def __init__(self, X: int, Y: int, R: int, G: int, B: int):
self.X = X
self.Y = Y
self.color = (R, G, B)
def render(self):
pygame.draw.rect(window, self.color, (self.X, self.Y, COLOR_BUTTON_SIZE, COLOR_BUTTON_SIZE))
pygame.draw.rect(window, (0x00, 0x00, 0x00), (self.X, self.Y, COLOR_BUTTON_SIZE, COLOR_BUTTON_SIZE), OUTLINE_WIDTH)
def on_click(self):
global current_color
current_color = self.color
def on_right_click(self):
tk_root = tk.Tk()
R_LABEL = tk.Label(tk_root, text = "R:")
R_LABEL.pack()
r_entry_box = tk.Entry(tk_root)
r_entry_box.pack()
r_entry_box.insert(0, "255")
G_LABEL = tk.Label(tk_root, text = "G:")
G_LABEL.pack()
g_entry_box = tk.Entry(tk_root)
g_entry_box.pack()
g_entry_box.insert(0, "255")
B_LABEL = tk.Label(tk_root, text = "B:")
B_LABEL.pack()
b_entry_box = tk.Entry(tk_root)
b_entry_box.pack()
b_entry_box.insert(0, "255")
def submit():
new_r_str = r_entry_box.get()
new_g_str = g_entry_box.get()
new_b_str = b_entry_box.get()
try:
NEW_COLOR = (
int(new_r_str),
int(new_g_str),
int(new_b_str),
)
for color_value in NEW_COLOR:
if color_value > 255 or color_value < 0:
raise Exception()
self.color = NEW_COLOR
except Exception:
pass
tk_root.destroy()
ok_button = tk.Button(tk_root, text = "Ok", command = submit)
ok_button.pack()
tk_root.mainloop()
def check_click(self):
MOUSE_STATE = pygame.mouse.get_pressed()
if MOUSE_STATE[0] == True: # MB1
MOUSE_X, MOUSE_Y = pygame.mouse.get_pos()
if MOUSE_X in range(self.X, self.X + COLOR_BUTTON_SIZE) and MOUSE_Y in range(self.Y, self.Y + COLOR_BUTTON_SIZE):
if not Button.mouse_previously_pressed:
self.on_click()
Button.mouse_previously_pressed = True
else:
Button.mouse_previously_pressed = False
if MOUSE_STATE[2] == True: # MB3
MOUSE_X, MOUSE_Y = pygame.mouse.get_pos()
if MOUSE_X in range(self.X, self.X + COLOR_BUTTON_SIZE) and MOUSE_Y in range(self.Y, self.Y + COLOR_BUTTON_SIZE):
if not ColorSelectionButton.mouse3_previously_pressed:
self.on_right_click()
ColorSelectionButton.mouse3_previously_pressed = True
else:
ColorSelectionButton.mouse3_previously_pressed = False
class ColorSelectionSection:
def __init__(self, X: int, Y: int, COLOR_BUTTONS, PADDING: int):
self.color_buttons = []
self.X = X
self.Y = Y
new_row = []
for color_button_index in range(len(COLOR_BUTTONS)):
new_row.append(COLOR_BUTTONS[color_button_index])
if len(new_row) == 4:
self.color_buttons.append(new_row)
new_row = []
if not not new_row:
self.color_buttons.append(new_row)
new_row = []
for row_index in range(len(self.color_buttons)):
for button_index in range(len(self.color_buttons[row_index])):
self.color_buttons[row_index][button_index].X = self.X + button_index * COLOR_BUTTON_SIZE + PADDING
self.color_buttons[row_index][button_index].Y = self.Y + row_index * COLOR_BUTTON_SIZE + PADDING
self.WIDTH = len(self.color_buttons[0]) * COLOR_BUTTON_SIZE + (2 * PADDING)
self.HEIGHT = len(self.color_buttons) * COLOR_BUTTON_SIZE + (2 * PADDING)
def render(self):
pygame.draw.rect(window, ColorPalette.FILL, (self.X, self.Y, self.WIDTH, self.HEIGHT))
pygame.draw.rect(window, ColorPalette.OUTLINE, (self.X, self.Y, self.WIDTH, self.HEIGHT), OUTLINE_WIDTH)
for row in self.color_buttons:
for color_button in row:
color_button.render()
class SideBar:
def __init__(self, X: int, Y: int, BUTTONS, PADDING: int):
self.X = X
self.Y = Y
self.BUTTONS = BUTTONS
desired_y = 0
for button_index in range(len(self.BUTTONS)):
desired_y = 0
for button in self.BUTTONS[:button_index]:
desired_y += button.HEIGHT + PADDING
self.BUTTONS[button_index].X = self.X + PADDING
self.BUTTONS[button_index].Y = self.Y + desired_y + PADDING
self.color_selection_section = ColorSelectionSection(
self.X + PADDING,
desired_y + self.BUTTONS[len(self.BUTTONS) - 1].HEIGHT + (PADDING * 2),
[
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF), # white
ColorSelectionButton(0, 0, 0x00, 0x00, 0x00), # black
ColorSelectionButton(0, 0, 0xBC, 0x73, 0x97), # light red
ColorSelectionButton(0, 0, 0x99, 0x43, 0x43), # red
ColorSelectionButton(0, 0, 0x56, 0x2A, 0x1C), # dark red
ColorSelectionButton(0, 0, 0xB9, 0xCC, 0x7E), # light green
ColorSelectionButton(0, 0, 0x4E, 0x99, 0x43), # green
ColorSelectionButton(0, 0, 0x0D, 0x47, 0x33), # dark green
ColorSelectionButton(0, 0, 0x6D, 0xAC, 0xBA), # light blue
ColorSelectionButton(0, 0, 0x43, 0x4D, 0x99), # blue
ColorSelectionButton(0, 0, 0x18, 0x11, 0x56), # dark blue
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
ColorSelectionButton(0, 0, 0xFF, 0xFF, 0xFF),
],
2,
)
BUTTONS_WIDTH = self.BUTTONS[len(self.BUTTONS) - 1].WIDTH + (PADDING * 2)
COLORS_WIDTH = self.color_selection_section.WIDTH + (PADDING * 2)
if BUTTONS_WIDTH > COLORS_WIDTH:
self.WIDTH = BUTTONS_WIDTH
else:
self.WIDTH = COLORS_WIDTH
self.HEIGHT = desired_y + self.BUTTONS[len(self.BUTTONS) - 1].HEIGHT + self.color_selection_section.HEIGHT + (PADDING * 2)
def render(self):
pygame.draw.rect(window, ColorPalette.FILL, (self.X, self.Y, self.WIDTH, self.HEIGHT))
pygame.draw.rect(window, ColorPalette.OUTLINE, (self.X, self.Y, self.WIDTH, self.HEIGHT), OUTLINE_WIDTH)
for button in self.BUTTONS:
button.render()
self.color_selection_section.render()
class Canvas:
zoom_plus_previously_pressed = False
zoom_minus_previously_pressed = False
_previous_mouse_position = (0, 0)
def __init__(self, X: int, Y: int, SIZE_X: int, SIZE_Y: int):
self.x = X
self.y = Y
self.size_x = SIZE_X
self.size_y = SIZE_Y
self.image = pygame.Surface((self.size_x, self.size_y)).convert()
self.image.fill(ColorPalette.CANVAS)
self.zoom = 1
self.current_image_original_size = (self.size_x, self.size_y)
def render(self):
window.blit(self.image, (self.x, self.y))
def readjust_size(self, NEW_SIZE_X: int, NEW_SIZE_Y: int):
self.size_x = NEW_SIZE_X
self.size_y = NEW_SIZE_Y
self.image = pygame.transform.scale(self.image, (self.size_x, self.size_y)).convert()
def load_image(self, FILENAME: str):
self.zoom = 1
NEW_IMAGE = pygame.image.load_basic(FILENAME).convert()
self.readjust_size(NEW_IMAGE.get_width(), NEW_IMAGE.get_height())
self.current_image_original_size = (self.size_x, self.size_y)
self.image = NEW_IMAGE
def zoom_in(self):
self.zoom += 1
self.readjust_size(self.size_x * 2, self.size_y * 2)
def zoom_out(self):
if self.zoom == 1:
return
self.zoom -= 1
self.readjust_size(int(self.size_x / 2), int(self.size_y / 2))
def handle_input(self):
PRESSED_KEYS = pygame.key.get_pressed()
PRESSED_MOUSE_BUTTONS = pygame.mouse.get_pressed()
if PRESSED_KEYS[pygame.K_LCTRL]:
if PRESSED_KEYS[pygame.K_PLUS] or PRESSED_KEYS[pygame.K_KP_PLUS]:
if not Canvas.zoom_plus_previously_pressed:
self.zoom_in()
Canvas.zoom_plus_previously_pressed = True
else:
Canvas.zoom_plus_previously_pressed = False
if PRESSED_KEYS[pygame.K_MINUS] or PRESSED_KEYS[pygame.K_KP_MINUS]:
if not Canvas.zoom_minus_previously_pressed:
self.zoom_out()
Canvas.zoom_minus_previously_pressed = True
else:
Canvas.zoom_minus_previously_pressed = False
if PRESSED_MOUSE_BUTTONS[0]: # MB1
if Canvas._previous_mouse_position == (0, 0):
Canvas._previous_mouse_position = pygame.mouse.get_pos()
MOUSE_POSITION = pygame.mouse.get_pos()
DELTA_X = MOUSE_POSITION[0] - Canvas._previous_mouse_position[0]
DELTA_Y = MOUSE_POSITION[1] - Canvas._previous_mouse_position[1]
Canvas._previous_mouse_position = MOUSE_POSITION
self.x += DELTA_X
self.y += DELTA_Y
else:
Canvas._previous_mouse_position = (0, 0)
else:
if PRESSED_MOUSE_BUTTONS[0]: # MB1
MOUSE_POSITION = pygame.mouse.get_pos()
RELATIVE_MOUSE_POSITION = (MOUSE_POSITION[0] - self.x, MOUSE_POSITION[1] - self.y)
TILED_MOUSE_POSITION = (
int(RELATIVE_MOUSE_POSITION[0] * (0.5 ** (self.zoom - 1))) * (2 ** (self.zoom - 1)),
int(RELATIVE_MOUSE_POSITION[1] * (0.5 ** (self.zoom - 1))) * (2 ** (self.zoom - 1)))
BRUSH_SCALE = 2 ** (self.zoom - 1)
if RELATIVE_MOUSE_POSITION[0] >= 0 and RELATIVE_MOUSE_POSITION[1] >= 0:
if RELATIVE_MOUSE_POSITION[0] < self.size_x and RELATIVE_MOUSE_POSITION[1] < self.size_y:
self.image.fill(current_color, (TILED_MOUSE_POSITION[0], TILED_MOUSE_POSITION[1], BRUSH_SCALE, BRUSH_SCALE))
def get_image_to_save(self) -> pygame.Surface:
image_to_save = pygame.transform.scale(self.image, self.current_image_original_size)
return image_to_save
canvas = Canvas(64, 64, 32, 32)
def resize_canvas():
tk_root = tk.Tk()
WIDTH_LABEL = tk.Label(tk_root, text = "Width:")
WIDTH_LABEL.pack()
width_entry_box = tk.Entry(tk_root)
width_entry_box.pack()
width_entry_box.insert(0, "32")
HEIGHT_LABEL = tk.Label(tk_root, text = "Height:")
HEIGHT_LABEL.pack()
height_entry_box = tk.Entry(tk_root)
height_entry_box.pack()
height_entry_box.insert(0, "32")
def submit():
new_width_str = width_entry_box.get().replace(' ', "")
new_height_str = height_entry_box.get().replace(' ', "")
try:
NEW_WIDTH: int = int(new_width_str)
NEW_HEIGHT: int = int(new_height_str)
if NEW_WIDTH <= 0 or NEW_HEIGHT <= 0:
raise Exception()
canvas.readjust_size(NEW_WIDTH, NEW_HEIGHT)
canvas.zoom = 1
canvas.current_image_original_size = (NEW_WIDTH, NEW_HEIGHT)
except Exception:
pass
tk_root.destroy()
ok_button = tk.Button(tk_root, text = "Ok", command = submit)
ok_button.pack()
tk_root.mainloop()
def zoom_canvas_plus():
canvas.zoom_in()
Canvas.zoom_minus_previously_pressed = True
def zoom_canvas_minus():
canvas.zoom_out()
Canvas.zoom_minus_previously_pressed = True
SIDE_BAR = SideBar(
0,
0,
[
Button(0, 0, "resize", 2, False, resize_canvas ),
Button(0, 0, "zoom+", 2, False, zoom_canvas_plus ),
Button(0, 0, "zoom-", 2, False, zoom_canvas_minus),
],
2,
)
def save_current():
global canvas
tk_root = tk.Tk()
tk_root.withdraw()
FILENAME = filedialog.asksaveasfilename(title = "Choose where to save your sprite!")
pygame.image.save(canvas.get_image_to_save(), FILENAME)
tk_root.destroy()
def open_file():
global canvas
tk_root = tk.Tk()
tk_root.withdraw()
FILENAME = filedialog.askopenfilename(title = "Open a sprite to edit!")
try:
canvas.load_image(FILENAME)
except Exception:
pass
tk_root.destroy()
TOP_BAR = TopBar(
SIDE_BAR.WIDTH + UI_SPACING,
0,
[
Button(0, 0, "save", 2, False, save_current),
Button(0, 0, "load", 2, False, open_file),
],
2,
)
canvas.x, canvas.y = SIDE_BAR.WIDTH + UI_SPACING, TOP_BAR.HEIGHT + UI_SPACING
while running:
clock.tick(FPS)
check_window_close()
for button in TOP_BAR.BUTTONS:
button.check_click()
for button in SIDE_BAR.BUTTONS:
button.check_click()
for row in SIDE_BAR.color_selection_section.color_buttons:
for button in row:
button.check_click()
canvas.handle_input()
window.fill(ColorPalette.BASE)
canvas.render()
SIDE_BAR.render()
TOP_BAR.render()
pygame.display.flip()
pygame.display.quit()
pygame.quit()
|
# Gradient descent will be performed for Rosenbrock function: (a-x)**2+ b(y-x**2)**2
def process(numIter, alpha, x, y, a, b):
history_arr = []
history_arr.append((x, y, get_f(x, y, a, b)))
for i in range(numIter):
x -= alpha * get_delta_x(x, y, a, b)
y -= alpha * get_delta_y(x, y, a, b)
history_arr.append((x, y, get_f(x, y, a, b)))
return history_arr
def get_delta_x(x, y, a, b):
return -2 * (a - x) - 2 * b * (y - x ** 2) * 2 * x
def get_delta_y(x, y, a, b):
return 2 * b * (y - x ** 2)
def get_f(x, y, a, b):
return (a - x) ** 2 + b * ((y - x ** 2) ** 2)
|
import cv2
import numpy as np
import time
video = cv2.VideoCapture(0, cv2.CAP_DSHOW)
time.sleep(3)
for i in range(60):
check, background = video.read()
background = np.flip(background, axis=1)
while(video.isOpened()):
check, img = video.read()
if check == False:
break
img = np.flip(img, axis=1)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_red = np.array([0, 120, 50])
upper_red = np.array([10, 255, 255])
mask1 = cv2.inRange(hsv, lower_red, upper_red)
lower_red = np.array([170, 120, 70])
upper_red = np.array([180, 255, 255])
mask2 = cv2.inRange(hsv, lower_red, upper_red)
mask1 = mask1 + mask2
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE, np.ones((3, 3), np.uint8))
mask2 = cv2.bitwise_not(mask1)
res1 = cv2.bitwise_and(img, img, mask=mask2)
res2 = cv2.bitwise_and(background, background, mask=mask1)
final = cv2.addWeighted(res1, 1, res2, 1, 0)
cv2.imshow("final", final)
key = cv2.waitKey(1)
if key == ord('q'):
break
video.release()
cv2.destroyAllWindows()
|
import os
import pynbody
import numpy as np
import tangos
import tangos.input_handlers.pynbody
from tangos import testing, input_handlers, tools, log, parallel_tasks
import numpy.testing as npt
def _get_gadget_snap_path(snapname):
return os.path.join(os.path.dirname(__file__),"test_simulations",
"test_gadget_rockstar",snapname)
def _create_dummy_simsnap():
f = pynbody.new(dm=2097152)
f['iord'] = np.arange(2097152)
f['pos'] = np.zeros((2097152,3)).view(pynbody.array.SimArray)
f['pos'].units="Mpc"
f['vel'] = np.zeros((2097152, 3)).view(pynbody.array.SimArray)
f['vel'].units = "km s^-1"
f['mass'] = np.zeros(2097152).view(pynbody.array.SimArray)
f['mass'].units="Msol"
f.properties['boxsize'] = pynbody.units.Unit("50 Mpc")
return f
def _ensure_dummy_gadgetsnaps_exist():
f = None
if not os.path.exists(_get_gadget_snap_path("snapshot_013")):
f = _create_dummy_simsnap()
f.properties['z'] = 2.9322353443127693
f.write(pynbody.snapshot.gadget.GadgetSnap, _get_gadget_snap_path("snapshot_013"))
if not os.path.exists(_get_gadget_snap_path("snapshot_014")):
if not f:
f = _create_dummy_simsnap()
f.properties['z'] = 2.2336350508252085
f.write(pynbody.snapshot.gadget.GadgetSnap, _get_gadget_snap_path("snapshot_014"))
def setup():
_ensure_dummy_gadgetsnaps_exist()
testing.init_blank_db_for_testing()
tangos.config.base = os.path.join(os.path.dirname(__file__), "test_simulations")
manager = tools.add_simulation.SimulationAdderUpdater(input_handlers.pynbody.GadgetRockstarInputHandler("test_gadget_rockstar"))
with log.LogCapturer():
manager.scan_simulation_and_add_all_descendants()
def teardown():
tangos.core.close_db()
def test_property_import():
importer = tools.property_importer.PropertyImporter()
importer.parse_command_line("X Y Z Mvir --for test_gadget_rockstar".split())
with log.LogCapturer():
parallel_tasks.use('multiprocessing')
parallel_tasks.launch(importer.run_calculation_loop,2)
Mvir_test, = tangos.get_timestep("test_gadget_rockstar/snapshot_013").calculate_all("Mvir")
npt.assert_allclose(Mvir_test, [1.160400e+13, 8.341900e+12, 5.061400e+12, 5.951900e+12])
def test_consistent_tree_import():
importer = tools.consistent_trees_importer.ConsistentTreesImporter()
importer.parse_command_line("--for test_gadget_rockstar --with-ids".split())
with log.LogCapturer():
importer.run_calculation_loop()
assert (tangos.get_timestep("test_gadget_rockstar/snapshot_014").calculate_all("consistent_trees_id", object_typetag='halo')[0]==[17081, 19718, 19129]).all()
testing.assert_halolists_equal(tangos.get_timestep("test_gadget_rockstar/snapshot_014").calculate_all("earlier(1)", object_typetag='halo')[0],
["test_gadget_rockstar/snapshot_013/halo_2",
"test_gadget_rockstar/snapshot_013/halo_4"])
assert tangos.get_halo("%/%13/halo_1").next == tangos.get_halo("%/%14/phantom_1")
|
import torch, numpy as np, warnings, pandas as pd, collections, torch.nn.functional as F
from torch.utils.data import DataLoader
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import LearningRateMonitor
from ..util import (_LitValidated, empty_cache_on_exit, create_matrix,
create_second_order_dataframe, default_random_split, auto_cast_lazy_score)
from .bpr import _BPR_Common
try:
import dgl, dgl.function as fn
except ImportError:
warnings.warn("GraphConv requires dgl package")
def _plain_average(G, item_embeddings):
with G.local_scope():
G.srcdata['h'] = item_embeddings
G.update_all(fn.copy_src('h', 'm'), fn.mean(msg='m', out='h'))
return G.dstdata['h']
class _GraphConv(_BPR_Common):
""" module to compute user RFM embedding.
"""
def __init__(self, n_users=None, n_items=None,
user_rec=True, item_rec=True, no_components=32,
n_negatives=10, lr=1, weight_decay=1e-5,
user_conv_model='GCN', # plain_average
user_embeddings=None, item_embeddings=None, item_zero_bias=False,
recency_boundary_multipliers=[0.1, 0.3, 1, 3, 10], horizon=float("inf")):
super().__init__(user_rec, item_rec, n_negatives, lr, weight_decay)
if item_embeddings is not None:
warnings.warn("setting no_components according to provided embeddings")
no_components = item_embeddings.shape[-1]
self.item_encoder = torch.nn.Embedding(n_items, no_components)
if item_embeddings is not None:
self.item_encoder.weight.requires_grad = False
self.item_encoder.weight.copy_(torch.as_tensor(item_embeddings))
self.item_bias_vec = torch.nn.Embedding(n_items, 1)
if item_zero_bias:
self.item_bias_vec.weight.requires_grad = False
self.item_bias_vec.weight.copy_(torch.zeros_like(self.item_bias_vec.weight))
if user_conv_model == 'GCN':
self.user_conv = dgl.nn.pytorch.conv.GraphConv(no_components, no_components, "none")
elif user_conv_model == 'plain_average':
self.user_conv = _plain_average
self.user_layer_norm = torch.nn.LayerNorm(no_components)
if user_embeddings is not None:
self.user_ext_layer_norm = torch.nn.LayerNorm(user_embeddings.shape[1])
self.register_buffer("recency_boundaries",
torch.as_tensor(recency_boundary_multipliers) * horizon)
self.recency_encoder = torch.nn.Embedding(len(recency_boundary_multipliers) + 1, 1)
self.init_weights()
def init_weights(self):
initrange = 0.1
if self.item_encoder.weight.requires_grad:
torch.nn.init.uniform_(self.item_encoder.weight, -initrange, initrange)
if self.item_bias_vec.weight.requires_grad:
torch.nn.init.zeros_(self.item_bias_vec.weight)
if hasattr(self.user_conv, "weight"):
torch.nn.init.uniform_(self.user_conv.weight, -initrange, initrange)
if hasattr(self.user_conv, "bias"):
torch.nn.init.zeros_(self.user_conv.bias)
def _user_subgraph(self, i, G):
I, i_reverse = torch.unique(i, return_inverse=True)
if len(I) < 0.5 * G.num_nodes('user'):
sampler = dgl.dataloading.neighbor.MultiLayerFullNeighborSampler(1)
mfg = sampler.sample_blocks(G.to(I.device), {'user': I})[0]
sub_G = dgl.edge_type_subgraph(dgl.block_to_graph(mfg), mfg.etypes)
return i_reverse, sub_G, sub_G.srcdata['_ID']
else:
return i, G, torch.arange(G.num_nodes('item')).to(i)
def user_encoder(self, i, G):
i, G, src_j = self._user_subgraph(i, G)
item_embeddings = self.item_encoder(src_j)
user_embeddings = self.user_layer_norm(self.user_conv(G, item_embeddings))
if 'embedding' in G.dstdata:
user_ext = self.user_ext_layer_norm(G.dstdata['embedding'])
user0, user1 = torch.split(user_embeddings, [
user_embeddings.shape[1] - user_ext.shape[1], user_ext.shape[1]], 1)
user_embeddings = torch.cat([user0, user1 + user_ext], 1)
return user_embeddings[i]
def user_bias_vec(self, i, G):
with G.local_scope():
G.update_all(lambda x: None, fn.max('t', 'last_t'))
user_recency = G.nodes['user'].data['test_t'] - G.nodes['user'].data['last_t']
recency_buckets = torch.bucketize(user_recency, self.recency_boundaries)
return self.recency_encoder(recency_buckets)[i]
def on_fit_start(self):
if hasattr(self, "prior_score") and not hasattr(self, "prior_score_T"):
self.prior_score_T = [getattr(p, "T", None) for p in self.prior_score]
self.G_list = [G.to(self.device) for G in self.G_list]
def training_step(self, batch, batch_idx):
k = batch[:, -1]
loss_list = []
for s, (G, u_p, i_p, p, pT) in enumerate(zip(
self.G_list, self.user_proposal, self.item_proposal,
self.prior_score, self.prior_score_T
)):
single_loss = self._bpr_training_step(batch[k == s, :-1], u_p, i_p, p, pT,
user_kw={"G": G})
loss_list.append(single_loss)
loss = torch.stack(loss_list).mean()
self.log("train_loss", loss, prog_bar=True)
return loss
class GraphConv:
def __init__(self, D, batch_size=10000, max_epochs=50,
sample_with_prior=True, sample_with_posterior=0.5, **kw):
self._padded_item_list = [None] + D.training_data.item_df.index.tolist()
self.batch_size = batch_size
self.max_epochs = max_epochs
self.sample_with_prior = sample_with_prior
self.sample_with_posterior = sample_with_posterior
self._model_kw = {'horizon': D.horizon}
if "embedding" in D.training_data.item_df:
item_embeddings = np.vstack(D.training_data.item_df["embedding"]).astype('float32')
item_embeddings = np.pad(item_embeddings, ((1, 0), (0, 0)), constant_values=0)
self._model_kw["item_embeddings"] = item_embeddings
self._model_kw.update(kw)
def _extract_features(self, D):
""" create item -> user graph; allow same USER_ID with different TEST_START_TIME """
user_non_empty = D.user_in_test.reset_index()[D.user_in_test['_hist_len'].values > 0]
past_event_df = user_non_empty['_hist_items'].explode().to_frame("ITEM_ID")
past_event_df["TIMESTAMP"] = user_non_empty['_hist_ts'].explode().values
past_event_df = past_event_df.join( # item embeddings are shared for different times
pd.Series({k: j for j, k in enumerate(self._padded_item_list)}).to_frame("j"),
on="ITEM_ID", how='inner') # drop oov items
G = dgl.heterograph(
{('user', 'source', 'item'): (past_event_df.index.values,
past_event_df["j"].values)},
{'user': len(D.user_in_test), 'item': len(self._padded_item_list)}
)
G.edata['t'] = torch.as_tensor(past_event_df["TIMESTAMP"].values.astype('float64'))
# add padding item to guard against users with empty histories
n_users = G.num_nodes('user')
pad_time = -np.inf * torch.ones(n_users).double()
G = dgl.add_edges(G, range(n_users), [0] * n_users, {'t': pad_time})
user_test_time = D.user_in_test['TEST_START_TIME'].values
G.nodes['user'].data['test_t'] = torch.as_tensor(user_test_time)
if hasattr(D.user_in_test, 'embedding'):
user_ext_embeddings = np.vstack(D.user_in_test['embedding']).astype('float32')
G.nodes['user'].data['embedding'] = torch.as_tensor(user_ext_embeddings)
return G.reverse(copy_edata=True)
def _extract_task(self, k, V):
user_proposal = np.ravel(V.target_csr.sum(axis=1) + 0.1) ** self.sample_with_posterior
item_proposal = np.ravel(V.target_csr.sum(axis=0) + 0.1) ** self.sample_with_posterior
item_proposal = pd.Series(item_proposal, V.item_in_test.index) \
.reindex(self._padded_item_list, fill_value=0).values
V = V.reindex(self._padded_item_list, axis=1)
target_coo = V.target_csr.tocoo()
dataset = np.transpose([
target_coo.row, target_coo.col, k * np.ones_like(target_coo.row),
]).astype(int)
G = self._extract_features(V)
return dataset, G, user_proposal, item_proposal, getattr(V, "prior_score", None)
@empty_cache_on_exit
def fit(self, *V_arr):
dataset, G_list, user_proposal, item_proposal, prior_score = zip(*[
self._extract_task(k, V) for k, V in enumerate(V_arr)
])
print("GraphConv label sizes", [len(d) for d in dataset])
dataset = np.vstack(dataset)
if "embedding" in V_arr[0].user_in_test:
self._model_kw["user_embeddings"] = np.vstack(
V_arr[0].user_in_test['embedding'].iloc[:1]) # just need shape[1]
model = _GraphConv(None, len(self._padded_item_list), **self._model_kw)
N = len(dataset)
train_set, valid_set = default_random_split(dataset)
trainer = Trainer(
max_epochs=self.max_epochs, gpus=int(torch.cuda.is_available()),
log_every_n_steps=1, callbacks=[model._checkpoint, LearningRateMonitor()])
model.G_list = G_list
model.user_proposal = user_proposal
model.item_proposal = item_proposal
if self.sample_with_prior:
model.prior_score = [auto_cast_lazy_score(p) for p in prior_score]
else:
model.prior_score = [None for p in prior_score]
trainer.fit(
model,
DataLoader(train_set, self.batch_size, shuffle=True, num_workers=(N > 1e4) * 4),
DataLoader(valid_set, self.batch_size, num_workers=(N > 1e4) * 4))
model._load_best_checkpoint("best")
for attr in ['G_list', 'user_proposal', 'item_proposal', 'prior_score', 'prior_score_T']:
delattr(model, attr)
src_j = torch.arange(len(self._padded_item_list))
self.item_embeddings = model.item_encoder(src_j).detach().numpy()
self.item_biases = model.item_bias_vec(src_j).detach().numpy().ravel()
self.model = model
return self
def transform(self, D):
G = self._extract_features(D)
i = torch.arange(G.num_nodes('user'))
user_embeddings = self.model.user_encoder(i, G).detach().numpy()
user_biases = self.model.user_bias_vec(i, G).detach().numpy().ravel()
S = create_second_order_dataframe(
user_embeddings, self.item_embeddings, user_biases, self.item_biases,
D.user_in_test.index, self._padded_item_list, 'softplus')
return S.reindex(D.item_in_test.index, axis=1)
|
import re
s = 'life is short, i use python'
res = re.search('life(.*)python', s)
print(res) # <re.Match object; span=(0, 27), match='life is short, i use python'>
print(res.group()) # life is short, i use python
print(res.group(0)) # life is short, i use python --> 0 代表完整匹配结果
print(res.group(1)) # is short, i use
print(res.groups()) # (' is short, i use ',)
|
BBBB BBBBBBBBBBBBBBBBBBBBBBB BB BBBB
BBBBBBB BBBBBBBBBBBBBBBBBBBBBBBB
BBBBBBB
BBB BBBB BB BBBBBBB
XXXX XXXXXXXXXXXXXXXXXXX
BBBBBBB BBBBBBBBBBBBBBBBBBBBBBBB
XXXXXX
BBBBBB
|
from ecoxipy.pyxom.output import PyXOMOutput
from ecoxipy.decorators import markup_builder_namespace
from ecoxipy.html import HTML5_ELEMENT_NAMES
from tests.performance.ecoxipy_base import create_testdoc
create_testdoc = markup_builder_namespace(
PyXOMOutput, '_b', *HTML5_ELEMENT_NAMES)(create_testdoc)
create_testdoc_string = lambda *args: bytes(create_testdoc(*args))
|
from .models import SocialLink, MainMenuPoint
def load_settings(request):
"""The processor for implements social_links and menu options in the context of every page on the site"""
return {
'social_links': SocialLink.objects.all(),
'main_menu': MainMenuPoint.objects.all(),
}
|
import math
import os
import shutil
import socket
import stat
import subprocess
import textwrap
import time
from aioredis.log import logger
REDIS_SERVER_EXEC = os.environ.get('REDIS_SERVER_EXEC') or 'redis-server'
REDIS_SLOT_COUNT = 16384
_MAX_RETRY_ERRORS = 4
_ATTEMPT_INTERVAL = 0.3
class TestCluster:
"""This class allows to create a local Redis cluster for test purposes.
It also includes methods to stop and restart nodes to test failover
behaviour.
Parameters:
- *ports*: The cluster will use the ports from ports list
- *directory* is used to store the configuration files of all
processes.
- *node_timeout*: The cluster node timeout in millliseconds,
see http://redis.io/topics/cluster-tutorial.
"""
def __init__(self, ports, directory, node_timeout=3000,
server_exec=REDIS_SERVER_EXEC, assign_slots=True):
self.redis_count = len(ports)
self.ports = ports
self.directory = os.path.abspath(directory)
self.node_timeout = node_timeout
self.processes = {}
self._new_directories = set()
self._exec = server_exec
self._assign_slots_in_setup = assign_slots
def setup(self):
self._setup_directory()
self._create_redis_directories()
for port in self.get_ports():
self._start_redis(port)
self.configure_cluster()
def terminate(self):
for process in self.processes.values():
process.terminate()
for process in self.processes.values():
process.wait(1)
def clear_directories(self):
for directory in self._new_directories:
try:
self._delete_directory_contents(directory)
os.rmdir(directory)
except PermissionError as error:
print(error)
def stop_redis(self, port):
if port not in self.processes:
raise ValueError('No Redis running at port {}.'.format(port))
process = self.processes.pop(port)
process.terminate()
process.wait(1)
def restart_redis(self, port):
if port in self.processes:
raise ValueError('Redis process at port {} is still running.'
.format(port))
self._start_redis(port)
def get_ports(self):
return self.ports
def _setup_directory(self):
if not os.path.exists(self.directory):
os.makedirs(self.directory)
def _create_redis_directories(self):
for port in self.get_ports():
redis_directory = self._get_redis_directory(port)
if not os.path.exists(redis_directory):
os.mkdir(redis_directory)
else:
self._delete_directory_contents(redis_directory)
self._write_redis_config_file(os.path.join(
redis_directory, 'redis.conf'), port)
self._new_directories.add(redis_directory)
def _start_redis(self, port):
directory = self._get_redis_directory(port)
self.processes[port] = subprocess.Popen(
[self._exec, 'redis.conf'], cwd=directory)
def configure_cluster(self):
time.sleep(_ATTEMPT_INTERVAL) # Give cluster some time to start up
addresses = [('127.0.0.1', port) for port in self.get_ports()]
sockets = self._connect_sockets(addresses)
masters_count = math.ceil(self.redis_count / 2)
masters = sockets[:masters_count]
master_addresses = addresses[:masters_count]
slaves = sockets[masters_count:]
master_node_ids = [self._determine_node_id(master, address)
for master, address in zip(masters, addresses)]
if self._assign_slots_in_setup:
self._assign_slots(masters, master_addresses)
self._send_meet_messages_to_all(sockets, addresses)
# MEET messages need some time to propagate
time.sleep(_ATTEMPT_INTERVAL)
self._send_replicate_messages(slaves, master_node_ids)
if self._assign_slots_in_setup:
# cluster never becomes 'ok' if slots are unbound
self._wait_until_cluster_state_ok(sockets)
for sock in sockets:
sock.close()
def _connect_sockets(self, addresses):
return self._retry(socket.create_connection, addresses,
error_message='Could not connect to Redis.')
def _determine_node_id(self, socket, address):
socket.sendall(b'CLUSTER NODES\r\n')
data = self._read_bulk_string_response(socket)
node_id = data[:40].decode('utf-8')
logger.debug("Master at {} has node id {}.".format(address, node_id))
return node_id
def _assign_slots(self, masters, addresses):
slot_boundaries = [math.floor(i * REDIS_SLOT_COUNT / len(masters))
for i in range(len(masters) + 1)]
slot_ranges = [range(b1, b2)
for b1, b2 in zip(slot_boundaries, slot_boundaries[1:])]
for master, slot_range, address in zip(
masters, slot_ranges, addresses):
logger.debug(
"Assigning master at {} slots {}-{}"
.format(address, slot_range.start, slot_range.stop - 1)
)
slots = ' '.join(str(slot) for slot in slot_range)
try:
self._send_command_and_expect_ok(
master, 'CLUSTER ADDSLOTS {}\r\n'.format(slots))
except IOError as e:
raise IOError(
"ADDSLOTS failed. Maybe a cluster is already running? "
"({}).".format(str(e))
)
def _send_meet_messages_to_all(self, sockets, addresses):
for i, sock in enumerate(sockets):
for j, address in enumerate(addresses):
if i != j:
self._send_command_and_expect_ok(
sock, 'CLUSTER MEET {} {}\r\n'.format(*address))
def _send_replicate_messages(self, slaves, master_node_ids):
def _send_replicate_message(arg):
slave, master_node_id = arg
self._send_command_and_expect_ok(
slave, 'CLUSTER REPLICATE {}\r\n'.format(master_node_id))
self._retry(
_send_replicate_message,
list(zip(slaves, master_node_ids)),
'Replication failed.'
)
def _wait_until_cluster_state_ok(self, sockets):
def _check_state(socket):
socket.sendall(b'CLUSTER INFO\r\n')
data = self._read_bulk_string_response(socket).decode('utf-8')
if 'cluster_state:ok' not in data:
raise IOError('Cluster state not ok')
self._retry(
_check_state,
sockets,
error_message='Cluster state not ok.',
max_errors=10
)
def _retry(self, method, arguments, error_message,
max_errors=_MAX_RETRY_ERRORS, interval=_ATTEMPT_INTERVAL):
results = [None] * len(arguments)
successful_indexes = []
errors = 0
while len(successful_indexes) < len(arguments):
for i, argument in enumerate(arguments):
if i not in successful_indexes:
try:
results[i] = method(argument)
successful_indexes.append(i)
except (IOError, ConnectionRefusedError):
errors += 1
if errors >= max_errors:
raise IOError(
error_message +
' Stop retrying after {} errors.'
.format(errors)
)
else:
logger.info(
error_message + ' Will retry after {}s.'
.format(interval)
)
time.sleep(interval)
return results
def _recv_until(self, socket, delimiter):
data = b''
while delimiter not in data:
data += socket.recv(1024)
index = data.index(delimiter)
return data[:index], data[index + len(delimiter):]
def _recv_bytes(self, socket, byte_count):
data = b''
while len(data) < byte_count:
received = socket.recv(min(1024, byte_count - len(data)))
if len(received) == 0:
raise IOError('Socket closed')
else:
data += received
return data
def _send_command_and_expect_ok(self, socket, command):
socket.sendall(command.encode('utf-8'))
response, _ = self._recv_until(socket, b'\r\n')
if response != b'+OK':
raise IOError(response.decode('utf-8'))
def _read_bulk_string_response(self, socket):
header, data = self._recv_until(socket, b'\r\n')
if header[0] != ord('$'):
raise ValueError('Expected bulk string response.')
byte_count = int(header[1:].decode('utf-8'))
missing_byte_count = byte_count - len(data) + 2
if missing_byte_count > 0:
remaining_data = self._recv_bytes(socket, missing_byte_count)
if remaining_data[-2:] != b'\r\n':
raise ValueError('Invalid bulk string received.')
data += remaining_data[:-2]
return data
def _get_redis_directory(self, port):
return os.path.join(self.directory, 'redis-{}'.format(port))
@staticmethod
def _delete_directory_contents(directory):
for name in os.listdir(directory):
path = os.path.join(directory, name)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def _write_redis_config_file(self, path, port):
with open(path, 'w') as file:
file.write(textwrap.dedent("""
port {}
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout {}
""".format(port, self.node_timeout)
))
# Protect against the CONFIG REWRITE test
self._remove_write_access(path)
@staticmethod
def _remove_write_access(path):
os.chmod(path, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
|
from pyhf.parameters.paramsets import (
paramset,
unconstrained,
constrained_by_normal,
constrained_by_poisson,
)
from pyhf.parameters.utils import reduce_paramsets_requirements
from pyhf.parameters.paramview import ParamViewer
__all__ = [
'paramset',
'unconstrained',
'constrained_by_normal',
'constrained_by_poisson',
'reduce_paramsets_requirements',
'ParamViewer',
]
def __dir__():
return __all__
|
import time
import datetime
print("Please type your text after 3 seconds")
print("3")
time.sleep(1)
print("2")
time.sleep(1)
print("Go!")
time.sleep(0.2)
before = datetime.datetime.now()
text=input("Type here:")
after = datetime.datetime.now()
speed = after - before
seconds = round(speed.total_seconds(),2)
letter_per_second = round(len(text) / seconds,1)
print("You typed in : {} seconds.".format(seconds))
print("{} letters per second.".format(letter_per_second))
|
import pygments, markdown, os
from flask import Flask, flash, redirect, render_template, render_template_string, request, url_for, send_from_directory
from flask import current_app as app
from flask_flatpages import FlatPages, pygmented_markdown, pygments_style_defs
from flask_mail import Mail, Message
from .forms.paginate import Paginate as Paginate
from .forms.forms import ContactForm as ContactForm
email_addr = os.environ.get('EMAIL_ACC', '')
pages = FlatPages(app)
mail = Mail(app)
# 404
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html"), 404
@app.route('/pygments.css')
def pygments_css():
return pygments_style_defs("monokai"), 200, {"Content-Type":"text/css"}
@app.route("/")
def index(num = 0):
posts = [p for p in pages if "date" in p.meta]
sorted_pages=sorted(posts, reverse=True, key=lambda page: page.meta["date"])
ppaginate=Paginate(app.config["PAGES_NUMBER_PER_PAGE"],sorted_pages)
if (num >= ppaginate.get_total_number()):
return redirect(url_for("index_extend"))
return render_template("index.html",num=num,pages=ppaginate.get_number_pages(num),config=app.config,current_number=num,total_num=ppaginate.get_total_number()- 1)
@app.route("/index/<string:num>.html")
def index_extend(num):
num=int(num)
posts = [p for p in pages if "date" in p.meta]
sorted_pages=sorted(posts, reverse=True, key=lambda page: page.meta["date"])
ppaginate=Paginate(app.config["PAGES_NUMBER_PER_PAGE"],sorted_pages)
if (num >= ppaginate.get_total_number()):
num = 0
return render_template("index.html",
num=num,
pages=ppaginate.get_number_pages(num),
config=app.config,
current_number=num,
total_num=ppaginate.get_total_number() - 1)
@app.route("/<path:path>/")
def staticpage(path):
print('path', path)
p = pages.get_or_404(path)
staticpage = p if "static" in p.meta else None
if page == None:
return page_not_found(404)
return render_template("page.html", page=staticpage)
@app.route('/status')
def status():
return 200
@app.route('/img/<path:filename>/')
def serve_static(filename):
print('filename', filename)
root_dir = os.path.dirname(os.path.realpath(__file__))
print(root_dir)
return send_from_directory(os.path.join(root_dir, 'pages', 'img'), filename)
@app.route("/articles/<path:path>/")
def page(path):
p = pages.get_or_404(path)
page = p if "date" in p.meta else None
if page == None:
return page_not_found(404)
return render_template("post.html", page=page)
@app.route("/tag/<string:tag>/")
def tag(tag):
tagged = [p for p in pages if tag in p.meta.get("tags", [])]
return render_template("tags.html", pages=tagged, tag=tag)
@app.route("/contact", methods=("GET", "POST"))
def contact():
form = ContactForm()
error = None
if request.method == "POST":
if form.validate() == False:
error = "Please fill in all fields"
else:
msg = Message(
"Message from " + form.name.data + "," + form.email.data,
sender=email_addr,
recipients=[email_addr])
msg.body = """
From: %s <%s>,
%s
""" % (form.name.data, form.email.data, form.message.data)
mail.send(msg)
flash("Message sent.")
return redirect( url_for("contact") )
return render_template("contact.html", form=form, error=error)
|
import json
import kenlm
from tqdm import tqdm
model = kenlm.Model("../es.arpa.bin")
def get_perplexity(doc):
doc_log_score, doc_length = 0, 0
for line in doc.split("\n"):
log_score = model.score(line)
length = len(line.split()) + 1
doc_log_score += log_score
doc_length += length
return 10.0 ** (-doc_log_score / doc_length)
with open("mc4-es-train-50M-stats.csv", "w") as csv:
with open("mc4-es-train-50M-steps.jsonl", "r") as data:
for line in tqdm(data):
text = json.loads(line)["text"]
csv.write(f"{len(text.split())},{get_perplexity(text)}\n")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.