max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
examples/beam_simple_rhino.py | franaudo/fea | 0 | 12766251 | from math import pi
from compas_fea.cad import rhino
from compas_fea.structure import CircularSection
from compas_fea.structure import ElasticIsotropic
from compas_fea.structure import ElementProperties as Properties
from compas_fea.structure import GeneralDisplacement
from compas_fea.structure import GeneralStep
from compas_fea.structure import PinnedDisplacement
from compas_fea.structure import PointLoad
from compas_fea.structure import Structure
# Author(s): <NAME> (github.com/andrewliew)
# Structure
mdl = Structure(name='beam_simple', path='C:/Temp/')
# Elements
network = rhino.network_from_lines(layer='elset_lines')
mdl.add_nodes_elements_from_network(network=network, element_type='BeamElement',
elset='elset_lines', axes={'ex': [0, -1, 0]})
# Sets
rhino.add_sets_from_layers(mdl, layers=['nset_left', 'nset_right', 'nset_weights'])
# Materials
mdl.add(ElasticIsotropic(name='mat_elastic', E=20*10**9, v=0.3, p=1500))
# Sections
_, ekeys, L, Lt = rhino.ordered_network(mdl, network=network, layer='nset_left')
for i, Li in zip(ekeys, L):
ri = (1 + Li / Lt) * 0.020
sname = 'sec_{0}'.format(i)
mdl.add(CircularSection(name=sname, r=ri))
mdl.add(Properties(name='ep_{0}'.format(i), material='mat_elastic', section=sname, elements=[i]))
# Displacements
mdl.add([
PinnedDisplacement(name='disp_left', nodes='nset_left'),
GeneralDisplacement(name='disp_right', nodes='nset_right', y=0, z=0, xx=0),
GeneralDisplacement(name='disp_rotate', nodes='nset_left', yy=30*pi/180),
])
# Loads
mdl.add(PointLoad(name='load_weights', nodes='nset_weights', z=-100))
# Steps
mdl.add([
GeneralStep(name='step_bc', displacements=['disp_left', 'disp_right']),
GeneralStep(name='step_load', loads='load_weights', displacements='disp_rotate'),
])
mdl.steps_order = ['step_bc', 'step_load']
# Summary
mdl.summary()
# Run
mdl.analyse_and_extract(software='opensees', fields=['u', 'ur', 'sf', 'sm'])
rhino.plot_data(mdl, step='step_load', field='um', radius=0.01, cbar_size=0.3)
rhino.plot_data(mdl, step='step_load', field='sf1', radius=0.01, cbar_size=0.3)
rhino.plot_data(mdl, step='step_load', field='sf2', radius=0.01, cbar_size=0.3)
rhino.plot_data(mdl, step='step_load', field='sm1', radius=0.01, cbar_size=0.3)
| 1.6875 | 2 |
zinc/route53/policy.py | PressLabs/zinc | 29 | 12766252 | import copy
from collections import OrderedDict
import zinc.route53
from zinc.utils import memoized_property
from .record import Record, RECORD_PREFIX
class Policy:
def __init__(self, zone, policy):
assert isinstance(zone, zinc.route53.Zone)
self.zone = zone
self.db_policy = policy
@property
def name(self):
return self.db_policy.name
@property
def id(self):
return self.db_policy.id
@property
def routing(self):
return self.db_policy.routing
@memoized_property
def aws_records(self):
"""What we have in AWS"""
return dict([
(r_id, record) for (r_id, record) in self.zone.records().items()
if record.is_member_of(self)
])
@memoized_property
def desired_records(self):
"""The records we should have (the desired state of the world)"""
return OrderedDict([(record.id, record) for record in self._build_tree()])
def _build_weighted_tree(self, policy_members, region_suffixed=True):
# Build simple tree
records = []
for policy_member in policy_members:
record_type = 'A'
if ':' in policy_member.ip.ip:
record_type = 'AAAA'
health_check_kwa = {}
if policy_member.ip.healthcheck_id:
health_check_kwa['health_check_id'] = str(policy_member.ip.healthcheck_id)
record = Record(
ttl=self.db_policy.ttl,
type=record_type,
values=[policy_member.ip.ip],
set_identifier='{}-{}'.format(str(policy_member.id), policy_member.region),
weight=policy_member.weight,
zone=self.zone,
**health_check_kwa,
)
# TODO: maybe we should have a specialized subclass for PolicyRecords
# and this logic should be moved there
if region_suffixed:
record.name = '{}_{}_{}'.format(RECORD_PREFIX, self.name, policy_member.region)
else:
record.name = '{}_{}'.format(RECORD_PREFIX, self.name)
records.append(record)
return records
def _build_lbr_tree(self, policy_members, regions):
# Build latency based routed tree
records = self._build_weighted_tree(policy_members)
for region in regions:
record = Record(
name='{}_{}'.format(RECORD_PREFIX, self.name),
type='A',
alias_target={
'HostedZoneId': self.zone.id,
'DNSName': '{}_{}_{}.{}'.format(
RECORD_PREFIX, self.name, region, self.zone.root),
'EvaluateTargetHealth': True # len(regions) > 1
},
region=region,
set_identifier=region,
zone=self.zone,
)
if self._has_ipv4_records_in_region(policy_members, region):
records.append(record)
# create a similar AAAA record if there exists IPv6 ips in this region.
if self._has_ipv6_records_in_region(policy_members, region):
record = copy.copy(record)
record.type = 'AAAA'
records.append(record)
return records
def _build_tree(self):
policy_members = self.db_policy.members.exclude(enabled=False).exclude(ip__enabled=False)
# ensure we always build region subtrees in alphabetical order; makes tests simpler
regions = sorted(set([pm.region for pm in policy_members]))
if len(regions) == 0:
raise Exception(
"Policy can't be applied for zone '{}'; "
"There is no member in the '{}' policy.".format(
self.zone, self
)
)
if self.routing == 'latency':
# Here is the case where are multiple regions
records = self._build_lbr_tree(policy_members, regions=regions)
# elif len(regions) == 1:
elif self.routing == 'weighted':
# Case with a single region
records = self._build_weighted_tree(
policy_members, region_suffixed=False)
else:
raise AssertionError('invalid routing {} for policy {}'.format(
self.routing, self.db_policy))
return records
def reconcile(self):
aws_record_ids = self.aws_records.keys()
desired_record_ids = self.desired_records.keys()
to_delete = []
for obsolete_rec_id in aws_record_ids - desired_record_ids:
record = self.aws_records[obsolete_rec_id]
record.deleted = True
to_delete.append(record)
self.zone.process_records(to_delete)
to_upsert = []
for rec_id, desired_record in self.desired_records.items():
existing_record = self.aws_records.get(rec_id)
if existing_record is None:
to_upsert.append(desired_record)
else:
# if desired is a subset of existing
if not desired_record.to_aws().items() <= existing_record.to_aws().items():
to_upsert.append(desired_record)
self.zone.process_records(to_upsert)
def remove(self):
records = list(self.aws_records.values())
for record in records:
record.deleted = True
self.zone.process_records(records)
def _has_ipv6_records_in_region(self, policy_members, region):
has_ipv6 = False
for pm in policy_members:
if region and pm.region != region:
continue
if ':' in pm.ip.ip:
has_ipv6 = True
return has_ipv6
def _has_ipv4_records_in_region(self, policy_members, region):
has_ipv4 = False
for pm in policy_members:
if region and pm.region != region:
continue
if '.' in pm.ip.ip:
has_ipv4 = True
return has_ipv4
| 2.65625 | 3 |
examples/tbss_tutorial.py | satra/NiPypeold | 0 | 12766253 | <filename>examples/tbss_tutorial.py
"""
XXX Currently not WORKING in release 0.3,
Check for updates
A pipeline example that uses several interfaces to
perform analysis on diffusion weighted images using
FSL fdt and tbss tools.
The data for this analysis is available at
http://www.mit.edu/~satra/nipype-nightly/users/pipeline_tutorial.html
"""
"""
1. Tell python where to find the appropriate functions.
"""
raise RuntimeWarning, 'tbss_tutorial currently not functional'
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.fsl as fsl # fsl
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import os # system functions
"""
1b. Confirm package dependencies are installed. (This is only for the
tutorial, rarely would you put this in your own code.)
"""
from nipype.utils.misc import package_check
package_check('numpy', '1.3', 'tutorial1')
package_check('scipy', '0.7', 'tutorial1')
package_check('networkx', '1.0', 'tutorial1')
package_check('IPython', '0.10', 'tutorial1')
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
"""
Setup for Tract-Based Spatial Statistics (TBSS) Computation
-----------------------------------------------------------
Here we will create a generic workflow for TBSS computation
"""
tbss_workflow = pe.Workflow(name='tbss')
tbss_workflow.base_dir=os.path.abspath('tbss_tutorial')
"""
collect all the FA images for each subject using the DataGrabber class
"""
subject_ids = [1260, 1549, 1636, 1651, 2078, 2378]
tbss_source = pe.Node(nio.DataGrabber(outfields=["fa_files"]),name="tbss_source")
tbss_source.inputs.base_directory = os.path.abspath('/media/sdb2/fsl_course/fsl_course_data/tbss/')
tbss_source.inputs.template = '%d.nii.gz'
tbss_source.inputs.template_args = dict(fa_files=[[subject_ids]])
"""
prepare your FA data in your TBSS working directory in the right format
"""
tbss1 = pe.Node(fsl.TBSS1Preproc(),name='tbss1')
"""
apply nonlinear registration of all FA images into standard space
"""
tbss2 = pe.Node(fsl.TBSS2Reg(),name='tbss2')
tbss2.inputs.FMRIB58FA=True
"""
create the mean FA image and skeletonise it
"""
tbss3 = pe.Node(fsl.TBSS3Postreg(),name='tbss3')
tbss3.inputs.FMRIB58FA=True
"""
project all subjects' FA data onto the mean FA skeleton
"""
tbss4 = pe.Node(fsl.TBSS4Prestats(),name='tbss4')
tbss4.inputs.threshold=0.3
"""
feed the 4D projected FA data into GLM modelling and thresholding
in order to find voxels which correlate with your model
"""
randomise = pe.Node(fsl.Randomise(),name='randomise')
#randomise.inputs.design_mat=os.path.abspath('data/design.mat')
#randomise.inputs.tcon=os.path.abspath('data/design.con')
randomise.inputs.num_perm=10
"""
Setup the pipeline that runs tbss
------------------
"""
tbss_workflow.connect([ (tbss_source,tbss1,[('fa_files','img_list')]),
(tbss1,tbss2,[('tbss_dir','tbss_dir')]),
(tbss2,tbss3,[('tbss_dir','tbss_dir')]),
(tbss3,tbss4,[('tbss_dir','tbss_dir')]),
(tbss4,randomise,[('all_FA_skeletonised','in_file')]),
(tbss4,randomise,[('mean_FA_skeleton_mask','mask')])
])
tbss_workflow.run()
tbss_workflow.write_graph()
| 2.109375 | 2 |
main.py | tiagobka/PythonBenchmark | 0 | 12766254 | <reponame>tiagobka/PythonBenchmark
import threading
import multiprocessing
import functools
from time import perf_counter, perf_counter_ns, sleep
from random import randint
""" PROGRAM CHALLENGE
Threads vs. Concurrent Processes
The challenge is to write a Python script to do some Benchmarking. You'll need to use some form of time to grab the time at the start of a code block and the end of the code block. Then, the difference between the two times shows us the speed of execution of this block.
The goal is to measure and compare two methodologies of spawning code to execute from the current script.
In the first block, have your code spawn four threads and measure their parallel execution. You can measure from start (before first thread started) to finish (after last thread is done). Each thread should only need to print something out, no need to get too crazy.
In the second block, use a different methodology to run four concurrent processes. These will be actual processes, so you have to learn about how to make the system call. Again, these processes should do the exact same thing as the four threads you created in block one. Again, measure start-to-finish of the four, and report the time.
Finally, print out the comparison of time used.
Questions: Let me know.
Consult the Internet. Consult books. Please do not consult people. The idea is to assess where you are with Python.
"""
# Wrapper function created to comply with DRY (Do not repeat yourself) principle
def benchmark(functionCallback):
""" Calculates the execution time of a given function in seconds
:param functionCallback: The function will measure the execution time of functionCallback and print it to the console
:return: returns a tuple of the execution time and the value returned by functionCallback
"""
@functools.wraps(functionCallback)
def wrapArround(*args, **kwargs):
countStart = perf_counter()
ret = functionCallback(*args, **kwargs)
countEnd = perf_counter()
execTime = countEnd - countStart
print("Execution time in seconds: {:f} s".format(execTime))
return execTime, ret
return wrapArround
def func_to_test(type, index, iterationsOrDelay, lock, mockTime: bool = False):
""" This function will compute 900 random integers or mock a delay between 1 and 5 seconds
:param type: Process || Thread
:param index: Index of current thread or process
:param iterationsOrDelay: Number of iterations or value of the random delay value to simulate heavy processing
:param lock: Lock for printing each line without other thread interruptions
:param mockTime: Defines if the function will use a random delay or compute <iterationsOrDelay> random integers
:return: None
"""
if mockTime:
sleep(iterationsOrDelay)
with lock: # Use lock to ensure that each print is in a new line
print("Thread {}: random delay of {}".format(index, iterationsOrDelay))
else:
for i in range(iterationsOrDelay):
randomInteger = randint(1, 999999)
with lock: # Use lock to ensure that each print is in a new line
print("{} {}: {}".format(type, index, randomInteger))
@benchmark
def firstBlock(mockExecutionTime: bool = False):
""" Creates 4 threads that execute "func_to_test" in parallel and returns the string "Return Some Value"
:param mockExecutionTime: if true will create a random small delay for each thread to simulate processing time
:return: Returns "Return Some Value"
:rtype: str
"""
listOfThreads = []
# Create a lock so that the print statement cannot be interrupted during context switches
printLock = threading.Lock()
# If mock value is false it will compute 900 random integers
iterations = 0 if mockExecutionTime else 900
# Creates 4 threads, start them and save them into a list of threads.
for i in range(4):
# If mock value is true computes a random time delay for each thread
delay = randint(1, 5) if mockExecutionTime else 0
thread = threading.Thread(target=func_to_test,
args=("Thread", i, delay | iterations, printLock),
kwargs={'mockTime': mockExecutionTime})
thread.start()
listOfThreads.append(thread)
# Waits until all threads finish
for thread in listOfThreads:
thread.join()
# Returns the following string just to demonstrate that the the wrapper function can handle return values
return "Returned Some Value"
@benchmark
def secondBlock(mockExecutionTime: bool = False):
""" Creates 4 subprocesses that execute "func_to_test" in parallel
:param mockExecutionTime:
:return: None
"""
listOfProcesses = []
iterations = 0 if mockExecutionTime else 900
printLock = multiprocessing.Lock()
for i in range(4):
delay = randint(1, 5) if mockExecutionTime else 0
process = multiprocessing.Process(target=func_to_test,
args=("Process", i, delay | iterations, printLock),
kwargs={'mockTime': mockExecutionTime})
process.start()
listOfProcesses.append(process)
for process in listOfProcesses:
process.join()
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
# This code creates 4 threads that print values to the console and then prints the total execution time
# If you pass True as a parameter each thread will have a delay between 1 and 5 seconds.
# In this case, the execution time should be similar to the thread with the largest delay.
firstBlockExecTime, ret = firstBlock()
# Proof that we still have access to the return value of the firstBlock function
print(ret)
secondBlockExecTime, _ = secondBlock()
print("-------------------------------------------\n"
"Threads execution time in seconds: {:f} \n"
"Processes execution time in seconds: {:f}".format(firstBlockExecTime, secondBlockExecTime))
| 3.671875 | 4 |
main.py | Eli-pixel/Etext | 1 | 12766255 | from ecolor import slow_color, slow_print, ecolor
ecolor("This is red text", "red")
ecolor("This is bold blue text", "bold_blue")
slow_print("This is slow_print", 0.025)
slow_color("This is slow_print but colorful", "blue", 0.025)
slow_color("This is slow_print but colorful and bold", "bold_blue", 0.025)
| 2.546875 | 3 |
src/babysteps/cal/u1272.py | solohm/multimeter | 1 | 12766256 | <gh_stars>1-10
#!/usr/bin/env python
# Python script to communicate with the
# Agilent U1253A / U1272A / U1273A etc.
# found originally on http://goo.gl/Gycv9H
# For more information on the protocol, check
# http://blog.philippklaus.de/2014/02/agilent-u1273a/
# and http://goo.gl/oIJi96
import sys
import time
import serial
class u1272:
def __init__(self,com_port,baud=9600):
self.port = serial.Serial(com_port, baud, timeout=.1)
def idn(self):
self.port.write("*IDN?\n")
return self.port.read(100).strip()
def read(self,second='no'):
if second == 'yes':
self.port.write("FETC? @2\n")
else:
self.port.write("FETC?\n")
rv = self.port.read(17)
rv = float(rv)
return rv
def close(self):
self.port.close()
if __name__=='__main__':
try:
port = sys.argv[1]
baud = int(sys.argv[2])
except:
port = "/dev/ttyUSB0"
baud = 9600
dmm = u1272(port,baud)
print dmm.read()
| 2.921875 | 3 |
application/train_utils.py | surajpaib/Size-Matters | 0 | 12766257 | <filename>application/train_utils.py
import torch
def train(mod, optim, criterion, epoch, train_loader):
mod.train()
for batch_idx, (data, target) in enumerate(train_loader):
optim.zero_grad()
output = mod(data)
loss = criterion(output, target)
loss.backward()
optim.step()
if batch_idx % 5 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(mod, criterion, test_loader):
mod.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
output = mod(data)
test_loss += criterion(output, target).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
class ReshapeTransform:
def __init__(self, new_size):
self.new_size = new_size
def __call__(self, img):
return torch.reshape(img, self.new_size)
| 2.5625 | 3 |
tccli/services/rum/rum_client.py | tencentcloudapi-test/tencentcloud-cli | 0 | 12766258 | # -*- coding: utf-8 -*-
import os
import sys
import six
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli import __version__
from tccli.utils import Utils
from tccli.exceptions import ConfigurationError, ClientError, ParamError
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.rum.v20210622 import rum_client as rum_client_v20210622
from tencentcloud.rum.v20210622 import models as models_v20210622
from jmespath import search
import time
def doDescribeTawAreas(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTawAreasRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeTawAreas(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateReleaseFile(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateReleaseFileRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CreateReleaseFile(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataLogUrlInfo(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataLogUrlInfoRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataLogUrlInfo(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeTawInstances(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTawInstancesRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeTawInstances(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataPerformancePage(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataPerformancePageRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataPerformancePage(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataLogUrlStatistics(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataLogUrlStatisticsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataLogUrlStatistics(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataFetchProject(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataFetchProjectRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataFetchProject(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteInstance(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteInstanceRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteInstance(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataStaticUrl(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataStaticUrlRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataStaticUrl(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doResumeInstance(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ResumeInstanceRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.ResumeInstance(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataPerformanceProject(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataPerformanceProjectRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataPerformanceProject(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeError(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeErrorRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeError(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLogList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLogListRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeLogList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeOfflineLogs(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeOfflineLogsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeOfflineLogs(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateTawInstance(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateTawInstanceRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CreateTawInstance(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePvList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePvListRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribePvList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLogExports(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLogExportsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeLogExports(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataWebVitalsPage(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataWebVitalsPageRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataWebVitalsPage(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteStarProject(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteStarProjectRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteStarProject(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataFetchUrlInfo(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataFetchUrlInfoRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataFetchUrlInfo(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataPvUrlStatistics(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataPvUrlStatisticsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataPvUrlStatistics(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeData(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeData(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeReleaseFileSign(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeReleaseFileSignRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeReleaseFileSign(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateLogExport(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateLogExportRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CreateLogExport(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataSetUrlStatistics(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataSetUrlStatisticsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataSetUrlStatistics(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataStaticResource(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataStaticResourceRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataStaticResource(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeProjectLimits(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeProjectLimitsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeProjectLimits(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataCustomUrl(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataCustomUrlRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataCustomUrl(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateWhitelist(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateWhitelistRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CreateWhitelist(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeProjects(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeProjectsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeProjects(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateStarProject(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateStarProjectRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CreateStarProject(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteWhitelist(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteWhitelistRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteWhitelist(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyProjectLimit(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyProjectLimitRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.ModifyProjectLimit(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doStopInstance(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.StopInstanceRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.StopInstance(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyProject(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyProjectRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.ModifyProject(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteReleaseFile(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteReleaseFileRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteReleaseFile(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteLogExport(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteLogExportRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteLogExport(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeWhitelists(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeWhitelistsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeWhitelists(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataEventUrl(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataEventUrlRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataEventUrl(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteOfflineLogRecord(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteOfflineLogRecordRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteOfflineLogRecord(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeOfflineLogConfigs(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeOfflineLogConfigsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeOfflineLogConfigs(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeScores(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeScoresRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeScores(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateProject(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateProjectRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CreateProject(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataReportCount(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataReportCountRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataReportCount(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataPvUrlInfo(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataPvUrlInfoRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataPvUrlInfo(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataStaticProject(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataStaticProjectRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataStaticProject(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteProject(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteProjectRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteProject(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeOfflineLogRecords(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeOfflineLogRecordsRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeOfflineLogRecords(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeUvList(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeUvListRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeUvList(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteOfflineLogConfig(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteOfflineLogConfigRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DeleteOfflineLogConfig(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeReleaseFiles(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeReleaseFilesRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeReleaseFiles(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyInstance(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyInstanceRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.ModifyInstance(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeDataFetchUrl(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeDataFetchUrlRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.DescribeDataFetchUrl(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateOfflineLogConfig(args, parsed_globals):
g_param = parse_global_arg(parsed_globals)
if g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
cred = credential.CVMRoleCredential()
elif g_param[OptionsDefine.RoleArn.replace('-', '_')] and g_param[OptionsDefine.RoleSessionName.replace('-', '_')]:
cred = credential.STSAssumeRoleCredential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.RoleArn.replace('-', '_')],
g_param[OptionsDefine.RoleSessionName.replace('-', '_')]
)
else:
cred = credential.Credential(
g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey], g_param[OptionsDefine.Token]
)
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint],
proxy=g_param[OptionsDefine.HttpsProxy.replace('-', '_')]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.RumClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateOfflineLogConfigRequest()
model.from_json_string(json.dumps(args))
start_time = time.time()
while True:
rsp = client.CreateOfflineLogConfig(model)
result = rsp.to_json_string()
try:
json_obj = json.loads(result)
except TypeError as e:
json_obj = json.loads(result.decode('utf-8')) # python3.3
if not g_param[OptionsDefine.Waiter] or search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj) == g_param['OptionsDefine.WaiterInfo']['to']:
break
cur_time = time.time()
if cur_time - start_time >= g_param['OptionsDefine.WaiterInfo']['timeout']:
raise ClientError('Request timeout, wait `%s` to `%s` timeout, last request is %s' %
(g_param['OptionsDefine.WaiterInfo']['expr'], g_param['OptionsDefine.WaiterInfo']['to'],
search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj)))
else:
print('Inquiry result is %s.' % search(g_param['OptionsDefine.WaiterInfo']['expr'], json_obj))
time.sleep(g_param['OptionsDefine.WaiterInfo']['interval'])
FormatOutput.output("action", json_obj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20210622": rum_client_v20210622,
}
MODELS_MAP = {
"v20210622": models_v20210622,
}
ACTION_MAP = {
"DescribeTawAreas": doDescribeTawAreas,
"CreateReleaseFile": doCreateReleaseFile,
"DescribeDataLogUrlInfo": doDescribeDataLogUrlInfo,
"DescribeTawInstances": doDescribeTawInstances,
"DescribeDataPerformancePage": doDescribeDataPerformancePage,
"DescribeDataLogUrlStatistics": doDescribeDataLogUrlStatistics,
"DescribeDataFetchProject": doDescribeDataFetchProject,
"DeleteInstance": doDeleteInstance,
"DescribeDataStaticUrl": doDescribeDataStaticUrl,
"ResumeInstance": doResumeInstance,
"DescribeDataPerformanceProject": doDescribeDataPerformanceProject,
"DescribeError": doDescribeError,
"DescribeLogList": doDescribeLogList,
"DescribeOfflineLogs": doDescribeOfflineLogs,
"CreateTawInstance": doCreateTawInstance,
"DescribePvList": doDescribePvList,
"DescribeLogExports": doDescribeLogExports,
"DescribeDataWebVitalsPage": doDescribeDataWebVitalsPage,
"DeleteStarProject": doDeleteStarProject,
"DescribeDataFetchUrlInfo": doDescribeDataFetchUrlInfo,
"DescribeDataPvUrlStatistics": doDescribeDataPvUrlStatistics,
"DescribeData": doDescribeData,
"DescribeReleaseFileSign": doDescribeReleaseFileSign,
"CreateLogExport": doCreateLogExport,
"DescribeDataSetUrlStatistics": doDescribeDataSetUrlStatistics,
"DescribeDataStaticResource": doDescribeDataStaticResource,
"DescribeProjectLimits": doDescribeProjectLimits,
"DescribeDataCustomUrl": doDescribeDataCustomUrl,
"CreateWhitelist": doCreateWhitelist,
"DescribeProjects": doDescribeProjects,
"CreateStarProject": doCreateStarProject,
"DeleteWhitelist": doDeleteWhitelist,
"ModifyProjectLimit": doModifyProjectLimit,
"StopInstance": doStopInstance,
"ModifyProject": doModifyProject,
"DeleteReleaseFile": doDeleteReleaseFile,
"DeleteLogExport": doDeleteLogExport,
"DescribeWhitelists": doDescribeWhitelists,
"DescribeDataEventUrl": doDescribeDataEventUrl,
"DeleteOfflineLogRecord": doDeleteOfflineLogRecord,
"DescribeOfflineLogConfigs": doDescribeOfflineLogConfigs,
"DescribeScores": doDescribeScores,
"CreateProject": doCreateProject,
"DescribeDataReportCount": doDescribeDataReportCount,
"DescribeDataPvUrlInfo": doDescribeDataPvUrlInfo,
"DescribeDataStaticProject": doDescribeDataStaticProject,
"DeleteProject": doDeleteProject,
"DescribeOfflineLogRecords": doDescribeOfflineLogRecords,
"DescribeUvList": doDescribeUvList,
"DeleteOfflineLogConfig": doDeleteOfflineLogConfig,
"DescribeReleaseFiles": doDescribeReleaseFiles,
"ModifyInstance": doModifyInstance,
"DescribeDataFetchUrl": doDescribeDataFetchUrl,
"CreateOfflineLogConfig": doCreateOfflineLogConfig,
}
AVAILABLE_VERSION_LIST = [
"v20210622",
]
def action_caller():
return ACTION_MAP
def parse_global_arg(parsed_globals):
g_param = parsed_globals
is_exist_profile = True
if not parsed_globals["profile"]:
is_exist_profile = False
g_param["profile"] = "default"
configure_path = os.path.join(os.path.expanduser("~"), ".tccli")
is_conf_exist, conf_path = Utils.file_existed(configure_path, g_param["profile"] + ".configure")
is_cred_exist, cred_path = Utils.file_existed(configure_path, g_param["profile"] + ".credential")
conf = {}
cred = {}
if is_conf_exist:
conf = Utils.load_json_msg(conf_path)
if is_cred_exist:
cred = Utils.load_json_msg(cred_path)
if not (isinstance(conf, dict) and isinstance(cred, dict)):
raise ConfigurationError(
"file: %s or %s is not json format"
% (g_param["profile"] + ".configure", g_param["profile"] + ".credential"))
if OptionsDefine.Token not in cred:
cred[OptionsDefine.Token] = None
if not is_exist_profile:
if os.environ.get(OptionsDefine.ENV_SECRET_ID) and os.environ.get(OptionsDefine.ENV_SECRET_KEY):
cred[OptionsDefine.SecretId] = os.environ.get(OptionsDefine.ENV_SECRET_ID)
cred[OptionsDefine.SecretKey] = os.environ.get(OptionsDefine.ENV_SECRET_KEY)
cred[OptionsDefine.Token] = os.environ.get(OptionsDefine.ENV_TOKEN)
if os.environ.get(OptionsDefine.ENV_REGION):
conf[OptionsDefine.Region] = os.environ.get(OptionsDefine.ENV_REGION)
if os.environ.get(OptionsDefine.ENV_ROLE_ARN) and os.environ.get(OptionsDefine.ENV_ROLE_SESSION_NAME):
cred[OptionsDefine.RoleArn] = os.environ.get(OptionsDefine.ENV_ROLE_ARN)
cred[OptionsDefine.RoleSessionName] = os.environ.get(OptionsDefine.ENV_ROLE_SESSION_NAME)
for param in g_param.keys():
if g_param[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId, OptionsDefine.Token]:
if param in cred:
g_param[param] = cred[param]
elif not g_param[OptionsDefine.UseCVMRole.replace('-', '_')]:
raise ConfigurationError("%s is invalid" % param)
elif param in [OptionsDefine.Region, OptionsDefine.Output]:
if param in conf:
g_param[param] = conf[param]
else:
raise ConfigurationError("%s is invalid" % param)
elif param.replace('_', '-') in [OptionsDefine.RoleArn, OptionsDefine.RoleSessionName]:
if param.replace('_', '-') in cred:
g_param[param] = cred[param.replace('_', '-')]
try:
if g_param[OptionsDefine.ServiceVersion]:
g_param[OptionsDefine.Version] = "v" + g_param[OptionsDefine.ServiceVersion].replace('-', '')
else:
version = conf["rum"][OptionsDefine.Version]
g_param[OptionsDefine.Version] = "v" + version.replace('-', '')
if g_param[OptionsDefine.Endpoint] is None:
g_param[OptionsDefine.Endpoint] = conf["rum"][OptionsDefine.Endpoint]
except Exception as err:
raise ConfigurationError("config file:%s error, %s" % (conf_path, str(err)))
if g_param[OptionsDefine.Version] not in AVAILABLE_VERSION_LIST:
raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
if g_param[OptionsDefine.Waiter]:
param = eval(g_param[OptionsDefine.Waiter])
if 'expr' not in param:
raise Exception('`expr` in `--waiter` must be defined')
if 'to' not in param:
raise Exception('`to` in `--waiter` must be defined')
if 'timeout' not in param:
if 'waiter' in conf and 'timeout' in conf['waiter']:
param['timeout'] = conf['waiter']['timeout']
else:
param['timeout'] = 180
if 'interval' not in param:
if 'waiter' in conf and 'interval' in conf['waiter']:
param['interval'] = conf['waiter']['interval']
else:
param['timeout'] = 5
param['interval'] = min(param['interval'], param['timeout'])
g_param['OptionsDefine.WaiterInfo'] = param
# 如果在配置文件中读取字段的值,python2中的json.load函数会读取unicode类型的值,因此这里要转化类型
if six.PY2:
for key, value in g_param.items():
if isinstance(value, six.text_type):
g_param[key] = value.encode('utf-8')
return g_param
| 1.695313 | 2 |
cmskit/dealers/urls.py | ozgurgunes/django-cmskit | 1 | 12766259 | <filename>cmskit/dealers/urls.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
urlpatterns = patterns('cmskit.dealers.views',
# url(r'^$', 'index', name='articles_index'),
url(r'^search/$', 'search', name='dealers_search'),
url(r'^(?P<pk>\d+)/$', 'dealer', name='dealers_dealer'),
) | 1.734375 | 2 |
extra/neurodsp/neurodsp/shape/phase.py | davidcrowland/layer_vb_tagging | 0 | 12766260 | """
phase.py
Estimate the phase of an oscillation using a waveform-based approach
"""
import numpy as np
def extrema_interpolated_phase(x, Ps, Ts, zeroxR=None, zeroxD=None):
"""
Use peaks (phase 0) and troughs (phase pi/-pi) to estimate
instantaneous phase. Also use rise and decay zerocrossings
(phase -pi/2 and pi/2, respectively) if specified.
Parameters
----------
x : array-like 1d
voltage time series
Ps : array-like 1d
time points of oscillatory peaks
Ts : array-like 1d
time points of oscillatory troughs
zeroxR : array-like 1d
indices at which oscillatory rising zerocrossings occur
zeroxD : array-like 1d
indices at which oscillatory decaying zerocrossings occur
Returns
-------
pha : array-like 1d
instantaneous phase
Notes
-----
Sometimes, due to noise, extrema and zerocrossing estimation
is poor, and for example, the same index may be assigned to
both a peak and a decaying zerocrossing. Because of this,
we first assign phase values by zerocrossings, and then
may overwrite them with extrema phases.
"""
# Initialize phase arrays
# 2 phase arrays: trough pi and trough -pi
L = len(x)
t = np.arange(L)
pha_tpi = np.zeros(L) * np.nan
pha_tnpi = np.zeros(L) * np.nan
# If specified, assign phases to zerocrossings
if zeroxR is not None:
pha_tpi[zeroxR] = -np.pi / 2
pha_tnpi[zeroxR] = -np.pi / 2
if zeroxD is not None:
pha_tpi[zeroxD] = np.pi / 2
pha_tnpi[zeroxD] = np.pi / 2
# Define phases
pha_tpi[Ps] = 0
pha_tpi[Ts] = np.pi
pha_tnpi[Ps] = 0
pha_tnpi[Ts] = -np.pi
# Interpolate to find all phases
pha_tpi = np.interp(t, t[~np.isnan(pha_tpi)], pha_tpi[~np.isnan(pha_tpi)])
pha_tnpi = np.interp(t, t[~np.isnan(pha_tnpi)], pha_tnpi[~np.isnan(pha_tnpi)])
# For the phase time series in which the trough is negative pi:
# Replace the decaying periods with these periods in the phase time
# series in which the trough is pi
diffs = np.diff(pha_tnpi)
diffs = np.append(diffs, 99)
pha_tnpi[diffs < 0] = pha_tpi[diffs < 0]
# Assign the periods before the first empirical phase timepoint to NaN
diffs = np.diff(pha_tnpi)
first_empirical_idx = next(i for i, xi in enumerate(diffs) if xi > 0)
pha_tnpi[:first_empirical_idx] = np.nan
# Assign the periods after the last empirical phase timepoint to NaN
diffs = np.diff(pha_tnpi)
last_empirical_idx = next(i for i, xi in enumerate(diffs[::-1]) if xi > 0)
pha_tnpi[-last_empirical_idx + 1:] = np.nan
return pha_tnpi
| 3.234375 | 3 |
simulator/generators/DtnMarkovBundleGenerator.py | seakers/dtnsim | 4 | 12766261 | # -*- coding: utf-8 -*-
import os
from collections import defaultdict
from copy import deepcopy
from warnings import warn
import numpy as np
import pandas as pd
from pathlib import Path
from simulator.core.DtnBundle import Bundle
from simulator.utils.DtnIO import load_traffic_file
from simulator.utils.DtnUtils import shift_traffic
from simulator.generators.DtnAbstractGenerator import DtnAbstractGenerator
# ============================================================================================================
# === DEFINE LATENCY CATEGORIES - THESE ARE CONSTANT
# ============================================================================================================
# Define latency
lat = np.array([[60, np.nan, np.nan],
[60, np.nan, np.nan],
[60, np.nan, 3600],
[60, 60, np.nan],
[60, 900, 21600],
[60, 300, 3600],
[60, 300, np.nan],
[60, 60, np.nan],
[60, 900, 21600],
[60, 900, 21600],
[60, 900, 21600],
[60, 300, np.nan]])
lat = pd.DataFrame(data=1.0*lat, columns=['seconds','minutes','hours'],
index=['voice','biomedical','caution and warning','command and teleoperation',
'file','health and status','nav type 1 products','nav type 2 message',
'pao hd video','sci hd video','science','sd video'])
# ============================================================================================================
# === FUNCTIONS TO CREATE TWO STATE MARKOV PROCESS AND BUNDLE GENERATION TIMES
# ============================================================================================================
def two_state_markov_process(Tmin, Tmax, DutyCycle, Ton):
# Initialize variables
Tstart = 0
Tend = Tmax - Tmin
Toff = ((1 / DutyCycle) - 1) * Ton
K = 10
ok = False
while not ok:
# Initialize variables
Ns = int(np.ceil(0.5*K*(Tend-Tstart)/(Ton + Toff)))
# Handle special case where duty cycle is 1
if DutyCycle == 1:
state, times = True, Tend
else:
state = np.random.uniform() < DutyCycle
on_dur = np.random.exponential(scale=Ton, size=Ns)
off_dur = np.random.exponential(scale=Toff, size=Ns)
times = np.zeros(2*Ns)
if state == True:
times[0::2] = on_dur
times[1::2] = off_dur
else:
times[0::2] = off_dur
times[1::2] = on_dur
# Finalize the process generated
times = np.insert(np.cumsum(times), 0, 0)
states = np.zeros_like(times, dtype=bool)
states[0::2] = state
states[1::2] = not state
# Validate the sequence
if times[-1] >= Tend: ok = True
else: K += 1
# Trim end of generated sequence to match Tend
times[times > Tend] = Tend
idx = np.argmax(times == Tend)+1
if idx != 0 and DutyCycle != 1.0 and idx != len(times):
times = times[0:idx]
states = states[0:idx]
# Shift times to Tmin, Tmax
times += Tmin
return times, states
def generate_markov_bundles(BS, Rb, Lat, Tmin, Tmax, DutyCycle, Ton):
# Generate Markov intervals
times, states = two_state_markov_process(Tmin, Tmax, DutyCycle, Ton)
# Initial processing entry. If initial state is OFF, skip it
ini = (states[0] == False)
# Initialize variables
t = []
buf = 0
state = True
# Iterate over periods
for i in range(ini, len(states)-1):
# Handle OFF state only if buffer is not empty
if state == False and buf != 0:
# t_ref indicates the time at which the last bundle was sent. If no
# bundles were ever sent, assume 0.
t_ref = 0 if len(t) == 0 else t[-1]
# If waiting for the start of the ON period will make you exceed
# the latency requirement, send a bundle with half data half padding.
while t_ref + Lat < times[i+1] and buf >= BS:
t_ref = max(t_ref, times[i]) + Lat
t.append(t)
buf -= BS
# Handle ON state
if state == True:
dv = buf + Rb * (times[i+1] - times[i])
N_bnd = int(np.floor(dv / BS))
t_bnd = times[i] + np.arange(1,N_bnd+1)*(BS / Rb)
if len(t_bnd) > 0: t_bnd -= buf/Rb
t_bnd = t_bnd[t_bnd <= times[i+1]]
t.extend(t_bnd)
buf = dv - N_bnd * BS
# Switch state
state = not state
# Add one last bundle add the end of t to transmit all unaccounted data.
# Note that this bundle might have some padding data
if buf > 0:
t_ref = times[-1] if len(t) == 0 else t[-1]
if states[-1] == False:
t.append(t_ref + Lat)
else:
t.append(max(t_ref, times[-1])+Lat)
buf = 0
# return times at which a bundle is delivered, and the amount of data left at the end
return t, buf
def generate_bundles(traffic, id2alias, min_bundle_size=1024, max_bundle_size=8e9, lat_frac=0.5):
# Get a map from node alias to ids
alias2id = {v: k for k, v in id2alias.items()}
# Get simulation start time
t0 = min([flow['StartTime'] for _, flow in traffic.items()])
# Iterate over flows
for fid, flow in traffic.items():
# Get the numeric latency
flow['Latency'] = lat.loc[flow['DataType'].lower(), flow['Latency'].lower()]
# Compute bundle size
bundle_lat = flow['Latency']*min(lat_frac, flow['DutyCycle'])
bundle_sz = min(max(min_bundle_size, int(flow['DataRate']*bundle_lat)), max_bundle_size)
# Get start and time for this flow
Tmin = (flow['StartTime'] - t0).total_seconds()
Tmax = (flow['EndTime'] - t0).total_seconds()
# Generate bundles
t, _ = generate_markov_bundles(bundle_sz, flow['DataRate'], flow['Latency'],
Tmin, Tmax, flow['DutyCycle'], flow['Duration'])
# Store the bundle times and size
flow['Bundles'] = t
flow['BundleSize'] = bundle_sz
flow['fid'] = fid
# Transform names of flows from alias to ids
flow['Orig'] = alias2id[flow['TransElementName']]
flow['Dest'] = alias2id[flow['ReceiveElementName']]
return traffic
# ============================================================================================================
# === SIMULATION CLASS
# ============================================================================================================
class DtnMarkovBundleGenerator(DtnAbstractGenerator):
_all_flows = None
def __init__(self, env, parent, props):
super().__init__(env, parent, props)
# Initialize variables
self.traffic_file = self.config['globals'].indir / props.file
def reset(self):
# Reset static variables
super().reset()
self.__class__._all_flows = None
def initialize(self):
# Setting static variables only once
if not self.__class__._all_flows: self.load_flows()
# Get flows for this generator
self.flows = self.__class__._all_flows[self.parent.nid]
# Iterate over all flows for this generator
for _, flow in self.flows.items(): self.env.process(self.run(flow))
def load_flows(self):
# Load generators file
traffic = shift_traffic(load_traffic_file(self.traffic_file), self.epoch)
# Generate bundles
id2alias = {nid: dd.alias for nid, dd in self.config['network'].nodes.items()}
flows = generate_bundles(traffic, id2alias, min_bundle_size=int(self.props.min_bundle_size),
max_bundle_size=float(self.props.max_bundle_size),
lat_frac=float(self.props.latency_fraction))
# Log bundle generation
for fid, flow in flows.items():
if len(flow['Bundles']) == 0:
self.disp('Flow {}: No bundles generated', fid)
else:
self.disp('Flow {}: {} bundles generated between t={:.3f} and t={:.3f}', fid, len(flow['Bundles']),
min(flow['Bundles']), max(flow['Bundles']))
# Create a dictionary of dictionaries or dictionary: {Node ID: {flow id: {flow props}}
d = defaultdict(dict)
for fid, flow in flows.items(): d[flow['Orig']][fid] = flow
# Store all the flows generated
self.__class__._all_flows = d
def run(self, flow):
# If no bundles, return
if len(flow['Bundles']) == 0: return
# Initialize variables
bnd_dt = np.insert(np.diff(flow['Bundles']), 0, flow['Bundles'][0])
# Iterate over bundle transmit times
for dt in bnd_dt:
# Wait until next time to transmit
yield self.env.timeout(dt)
# Create a new bundle and record it
new_bundle = Bundle.from_flow(self.env, flow)
# Monitor the new bundle creation
self.monitor_new_bundle(new_bundle)
# Log the new bundle creation
self.disp('{} is created at node {}', new_bundle, self.parent.nid)
# Schedule routers of bundle
self.parent.forward(new_bundle)
def predicted_data_vol(self):
""" Predicted data volume in [bits] """
return sum(f['DataRate']*((f['EndTime']-f['StartTime']).total_seconds())
for f in self.flows.values()) | 1.71875 | 2 |
src/CLI/actioner/sonic_cli_ptp.py | project-arlo/sonic-mgmt-framework | 7 | 12766262 | <reponame>project-arlo/sonic-mgmt-framework<gh_stars>1-10
import sys
import base64
import struct
import socket
import cli_client as cc
from rpipe_utils import pipestr
from scripts.render_cli import show_cli_output
def node_addr_type(address):
try:
socket.inet_pton(socket.AF_INET, address)
except socket.error:
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error:
return "mac"
return "ipv6"
return "ipv4"
def decode_base64(string):
my_bin = base64.b64decode(string)
return "%02x%02x%02x.%02x%02x.%02x%02x%02x" % struct.unpack("BBBBBBBB", my_bin)
def get_unicast_table(aa, instance_num, port_num):
tmp_keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}',
instance_number=instance_num)
tmp_response = aa.get(tmp_keypath)
if tmp_response is None:
return 0, "None"
found = 0
if tmp_response.ok():
response = tmp_response.content
if response is not None and response != {}:
for i in response['ietf-ptp:instance-list']:
if 'port-ds-list' in i:
for j in i['port-ds-list']:
if 'port-number' in j:
if j['port-number'] == int(port_num):
found = 1
if 'ietf-ptp-ext:unicast-table' in j:
if j['ietf-ptp-ext:unicast-table'] == '':
return found, "None"
else:
return found, j['ietf-ptp-ext:unicast-table']
return found, "None"
def get_port_num(interface):
if 'Ethernet' in interface:
port_num = interface.replace("Ethernet", "")
if 'Vlan' in interface:
port_num = str(int(interface.replace("Vlan", "")) + 1000)
return port_num
def invoke(func, args):
rc = None
body = None
aa = cc.ApiClient()
if func == 'patch_ietf_ptp_ptp_instance_list_default_ds_domain_number':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/default-ds/domain-number',
instance_number=args[0])
body = {"ietf-ptp:domain-number": int(args[1])}
rc = aa.patch(keypath, body)
elif func == 'patch_ietf_ptp_ptp_instance_list_default_ds_two_step_flag':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/default-ds/two-step-flag',
instance_number=args[0])
if args[1] == "enable":
body = {"ietf-ptp:two-step-flag": True}
else:
body = {"ietf-ptp:two-step-flag": False}
rc = aa.patch(keypath, body)
elif func == 'patch_ietf_ptp_ptp_instance_list_default_ds_priority1':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/default-ds/priority1',
instance_number=args[0])
body = {"ietf-ptp:priority1": int(args[1])}
rc = aa.patch(keypath, body)
elif func == 'patch_ietf_ptp_ptp_instance_list_default_ds_priority2':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/default-ds/priority2',
instance_number=args[0])
body = {"ietf-ptp:priority2": int(args[1])}
rc = aa.patch(keypath, body)
elif func == 'patch_ietf_ptp_ptp_instance_list_port_ds_list_log_announce_interval':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/default-ds/ietf-ptp-ext:log-announce-interval',
instance_number=args[0])
body = {"ietf-ptp-ext:log-announce-interval": int(args[1])}
rc = aa.patch(keypath, body)
elif func == 'patch_ietf_ptp_ptp_instance_list_port_ds_list_announce_receipt_timeout':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/default-ds/ietf-ptp-ext:announce-receipt-timeout',
instance_number=args[0])
body = {"ietf-ptp-ext:announce-receipt-timeout": int(args[1])}
rc = aa.patch(keypath, body)
elif func == 'patch_ietf_ptp_ptp_instance_list_port_ds_list_log_sync_interval':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/default-ds/ietf-ptp-ext:log-sync-interval',
instance_number=args[0])
body = {"ietf-ptp-ext:log-sync-interval": int(args[1])}
rc = aa.patch(keypath, body)
elif func == 'patch_ietf_ptp_ptp_instance_list_port_ds_list_log_min_delay_req_interval':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/default-ds/ietf-ptp-ext:log-min-delay-req-interval',
instance_number=args[0])
body = {"ietf-ptp-ext:log-min-delay-req-interval": int(args[1])}
rc = aa.patch(keypath, body)
elif func == 'clock-type':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/default-ds/ietf-ptp-ext:clock-type',
instance_number=args[0])
body = {"ietf-ptp-ext:clock-type": args[1]}
rc = aa.patch(keypath, body)
elif func == 'network-transport':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/default-ds/ietf-ptp-ext:network-transport',
instance_number=args[0])
body = {"ietf-ptp-ext:network-transport": args[1]}
rc = aa.patch(keypath, body)
elif func == 'unicast-multicast':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/default-ds/ietf-ptp-ext:unicast-multicast',
instance_number=args[0])
body = {"ietf-ptp-ext:unicast-multicast": args[1]}
rc = aa.patch(keypath, body)
elif func == 'domain-profile':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/default-ds/ietf-ptp-ext:domain-profile',
instance_number=args[0])
body = {"ietf-ptp-ext:domain-profile": args[1]}
rc = aa.patch(keypath, body)
elif func == 'udp6-scope':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/default-ds/ietf-ptp-ext:udp6-scope',
instance_number=args[0])
body = {"ietf-ptp-ext:udp6-scope": int(args[1], 0)}
rc = aa.patch(keypath, body)
elif func == 'add_master_table':
port_num = get_port_num(args[1])
found, uc_tbl = get_unicast_table(aa, args[0], port_num)
if not found:
print("%Error: " + args[1] + " has not been added")
sys.exit()
nd_list = []
if uc_tbl != 'None':
nd_list = uc_tbl.split(',')
if args[2] in nd_list:
# entry already exists
sys.exit()
if len(nd_list) == 1 and nd_list[0] == '':
nd_list = []
if len(nd_list) > 0 and node_addr_type(nd_list[0]) != node_addr_type(args[2]):
print("%Error: Mixed address types not allowed")
sys.exit()
if len(nd_list) >= 8:
print("%Error: maximum 8 nodes")
sys.exit()
nd_list.append(args[2])
value = ','.join(nd_list)
args[2] = value
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/port-ds-list={port_number}/ietf-ptp-ext:unicast-table',
instance_number=args[0], port_number=port_num)
body = {"ietf-ptp-ext:unicast-table": args[2]}
rc = aa.patch(keypath, body)
elif func == 'del_master_table':
port_num = get_port_num(args[1])
found, uc_tbl = get_unicast_table(aa, args[0], port_num)
nd_list = []
if uc_tbl != 'None':
nd_list = uc_tbl.split(',')
if args[2] not in nd_list:
# entry doesn't exists
sys.exit()
nd_list.remove(args[2])
value = ','.join(nd_list)
args[2] = value
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/port-ds-list={port_number}/ietf-ptp-ext:unicast-table',
instance_number=args[0], port_number=port_num)
body = {"ietf-ptp-ext:unicast-table": args[2]}
rc = aa.patch(keypath, body)
elif func == 'post_ietf_ptp_ptp_instance_list_port_ds_list_port_state':
port_num = get_port_num(args[1])
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/port-ds-list={port_number}/underlying-interface',
instance_number=args[0], port_number=port_num)
body = {"ietf-ptp:underlying-interface": args[1]}
rc = aa.patch(keypath, body)
elif func == 'delete_ietf_ptp_ptp_instance_list_port_ds_list':
port_num = get_port_num(args[1])
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/port-ds-list={port_number}',
instance_number=args[0], port_number=port_num)
rc = aa.delete(keypath)
elif func == 'get_ietf_ptp_ptp_instance_list_time_properties_ds':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/time-properties-ds',
instance_number=args[0])
rc = aa.get(keypath)
elif func == 'get_ietf_ptp_ptp_instance_list_parent_ds':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/parent-ds',
instance_number=args[0])
rc = aa.get(keypath)
elif func == 'get_ietf_ptp_ptp_instance_list_port_ds_list':
port_num = get_port_num(args[1])
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/port-ds-list={port_number}',
instance_number=args[0], port_number=port_num)
rc = aa.get(keypath)
elif func == 'get_ietf_ptp_ptp_instance_list_default_ds':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/default-ds',
instance_number=args[0])
rc = aa.get(keypath)
elif func == 'get_ietf_ptp_ptp_instance_list':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}',
instance_number=args[0])
rc = aa.get(keypath)
elif func == 'get_ietf_ptp_ptp_instance_list_current_ds':
keypath = cc.Path('/restconf/data/ietf-ptp:ptp/instance-list={instance_number}/current-ds',
instance_number=args[0])
rc = aa.get(keypath)
else:
print("%Error: not implemented")
exit(1)
return rc
def run(func, args):
if func == "show_ptp_clock":
run("get_ietf_ptp_ptp_instance_list_default_ds", args)
run("get_ietf_ptp_ptp_instance_list_current_ds", args[2:])
return
elif func == "network_transport_uc_mc":
run("network-transport", args)
run("unicast-multicast", args[2:])
return
api_response = invoke(func, args)
if api_response is None:
return
if api_response.ok():
response = api_response.content
if response is None:
if func != 'network-transport':
print("Success")
else:
# Get Command Output
if func == 'get_ietf_ptp_ptp_instance_list_default_ds':
if not response == {}:
if 'clock-identity' in response['ietf-ptp:default-ds']:
response['ietf-ptp:default-ds']['clock-identity'] = decode_base64(response['ietf-ptp:default-ds']['clock-identity'])
show_cli_output(args[1], response)
elif func == 'get_ietf_ptp_ptp_instance_list_port_ds_list':
show_cli_output(args[2], response)
elif func == 'get_ietf_ptp_ptp_instance_list_parent_ds':
if not response == {}:
if 'parent-port-identity' in response['ietf-ptp:parent-ds']:
response['ietf-ptp:parent-ds']['parent-port-identity']['clock-identity'] = decode_base64(response['ietf-ptp:parent-ds']['parent-port-identity']['clock-identity'])
if 'grandmaster-identity' in response['ietf-ptp:parent-ds']:
response['ietf-ptp:parent-ds']['grandmaster-identity'] = decode_base64(response['ietf-ptp:parent-ds']['grandmaster-identity'])
show_cli_output(args[1], response)
elif func == 'get_ietf_ptp_ptp_instance_list_time_properties_ds':
show_cli_output(args[1], response)
elif func == 'get_ietf_ptp_ptp_instance_list':
show_cli_output(args[1], response)
elif func == 'get_ietf_ptp_ptp_instance_list_current_ds':
show_cli_output(args[1], response)
else:
return
else:
response = api_response.content
if "ietf-restconf:errors" in response:
err = response["ietf-restconf:errors"]
if "error" in err:
errList = err["error"]
errDict = {}
for err_list_dict in errList:
for k, v in err_list_dict.iteritems():
errDict[k] = v
if "error-message" in errDict:
print("%Error: " + errDict["error-message"])
sys.exit(-1)
print("%Error: Transaction Failure")
sys.exit(-1)
print(api_response.error_message())
print("%Error: Transaction Failure")
sys.exit(-1)
if __name__ == '__main__':
pipestr().write(sys.argv)
# pdb.set_trace()
run(sys.argv[1], sys.argv[2:])
| 2.046875 | 2 |
src/keras_examples/sine2.py | JouniVatanen/NLP-and-Deep-Learning | 1 | 12766263 | <gh_stars>1-10
# https://lazyprogrammer.me
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.models import Model
from keras.layers import Input, SimpleRNN, Dense
from keras.optimizers import SGD, Adam
# make the original data
series1 = np.sin(0.1*np.arange(200))
series2 = np.sin(0.2*np.arange(200))
# plot it
plt.plot(series1)
plt.plot(series2)
plt.show()
### build the dataset
# let's see if we can use T past values to predict the next value
T = 10
D = 2
X = []
Y = []
for t in range(len(series1) - T - 1):
x = [series1[t:t+T], series2[t:t+T]]
# print("x[-1]:", x[-1])
X.append(x)
y = series1[t+T] + series2[t+T]
# print("y:", y)
Y.append(y)
X = np.array(X)
print("X.shape:", X.shape)
X = np.transpose(X, (0, 2, 1))
Y = np.array(Y)
N = len(X)
### many-to-one RNN
# make the RNN
i = Input(shape=(T, D))
x = SimpleRNN(5)(i)
x = Dense(1)(x)
model = Model(i, x)
model.compile(
loss='mse',
optimizer=Adam(lr=0.1),
)
# train the RNN
r = model.fit(
X[:-N//2], Y[:-N//2],
batch_size=32,
epochs=80,
validation_data=(X[-N//2:], Y[-N//2:]),
)
# plot some data
plt.plot(r.history['loss'], label='loss')
plt.plot(r.history['val_loss'], label='val_loss')
plt.legend()
plt.show()
# plot predictions vs targets
outputs = model.predict(X)
print(outputs.shape)
predictions = outputs[:,0]
plt.plot(Y, label='targets')
plt.plot(predictions, label='predictions')
plt.title("many-to-one RNN")
plt.legend()
plt.show()
| 2.8125 | 3 |
tests/test_client.py | jakm/fastjsonrpc | 7 | 12766264 | import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from twisted.trial.unittest import TestCase, SkipTest
from twisted.internet.defer import Deferred
from twisted.web.server import Site
from twisted.internet import reactor
from twisted.web.client import Agent
from twisted.internet.error import TimeoutError
from twisted.web.client import HTTPConnectionPool
from twisted.web.client import ContentDecoderAgent, GzipDecoder
from twisted.internet import ssl
from twisted.cred.portal import Portal
from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
from twisted.cred.credentials import Anonymous, UsernamePassword
from twisted.web.guard import HTTPAuthSessionWrapper, BasicCredentialFactory
from fastjsonrpc.client import ReceiverProtocol
from fastjsonrpc.client import StringProducer
from fastjsonrpc.client import ProxyFactory
from fastjsonrpc.client import Proxy
from fastjsonrpc import jsonrpc
from dummyserver import DummyServer, AuthDummyServer
class TestReceiverProtocol(TestCase):
def setUp(self):
self.rp = ReceiverProtocol(Deferred())
def test_init(self):
self.assertTrue(isinstance(self.rp.finished, Deferred))
def test_dataReceivedOnce(self):
data = 'some random string'
self.rp.dataReceived(data)
self.assertEquals(self.rp.body, data)
def test_dataReceivedTwice(self):
data1 = 'string1'
data2 = 'string2'
self.rp.dataReceived(data1)
self.rp.dataReceived(data2)
self.assertEquals(self.rp.body, data1 + data2)
def test_connectionLostCalled(self):
data = 'some random string'
self.rp.dataReceived(data)
self.rp.connectionLost(None)
self.assertTrue(self.rp.finished.called)
def test_connectionLostCalledData(self):
data = 'some random string'
self.rp.dataReceived(data)
def called(data_received):
self.assertEquals(data_received, data)
self.rp.finished.addCallback(called)
self.rp.connectionLost(None)
return self.rp.finished
class DummyConsumer(object):
def __init__(self):
self.body = ''
def write(self, data):
self.body += data
class TestStringProducer(TestCase):
def test_init(self):
data = 'some random string'
sp = StringProducer(data)
self.assertEquals(sp.body, data)
self.assertEquals(sp.length, len(data))
def test_startProducing(self):
data = 'some random string'
sp = StringProducer(data)
consumer = DummyConsumer()
d = sp.startProducing(consumer)
def finished(_):
self.assertEquals(consumer.body, data)
d.addCallback(finished)
return d
class DummyResponse(object):
def __init__(self, body):
self.body = body
def deliverBody(self, protocol):
self.protocol = protocol
self.protocol.dataReceived(self.body)
self.protocol.connectionLost(None)
class TestProxy(TestCase):
"""
@TODO: Test callRemote using fake agent, using predefined 'output' JSON,
like in server tests. This might require a bit of refactoring in
client itself.
"""
def setUp(self):
site = Site(DummyServer())
self.port = reactor.listenTCP(0, site)
self.portNumber = self.port._realPortNumber
def tearDown(self):
self.port.stopListening()
def test_init(self):
url = 'http://example.org/abcdef'
version = '2.0'
proxy = Proxy(url, version)
self.assertEquals(proxy.url, url)
self.assertEquals(proxy.version, version)
self.assertTrue(isinstance(proxy.credentials, Anonymous))
self.assertTrue(proxy.agent._connectTimeout is None)
def test_init_agent(self):
proxy = Proxy('', '')
self.assertTrue(isinstance(proxy.agent, Agent))
def test_bodyFromResponseProtocolBody(self):
data = 'some random string'
proxy = Proxy('', '')
response = DummyResponse(data)
d = proxy.bodyFromResponse(response)
def finished(_):
self.assertEquals(response.protocol.body, data)
d.addCallback(finished)
return d
def test_bodyFromResponseDeferred(self):
data = 'some random string'
proxy = Proxy('', '')
response = DummyResponse(data)
d = proxy.bodyFromResponse(response)
def finished(result):
self.assertEquals(result, data)
d.addCallback(finished)
return d
def test_callRemoteV1Ok(self):
data = 'some random string'
addr = 'http://localhost:%s' % self.portNumber
proxy = Proxy(addr, jsonrpc.VERSION_1)
d = proxy.callRemote('echo', data)
def finished(result):
self.assertEquals(result, data)
d.addCallback(finished)
return d
def test_callRemoteV2Ok(self):
data = 'some random string'
addr = 'http://localhost:%s' % self.portNumber
proxy = Proxy(addr, jsonrpc.VERSION_2)
d = proxy.callRemote('echo', data)
def finished(result):
self.assertEquals(result, data)
d.addCallback(finished)
return d
def test_callRemoteV1NoMethod(self):
addr = 'http://localhost:%s' % self.portNumber
proxy = Proxy(addr, jsonrpc.VERSION_1)
d = proxy.callRemote('nosuchmethod')
e = self.assertFailure(d, jsonrpc.JSONRPCError)
def finished(result):
self.assertEquals(result.strerror, 'Method nosuchmethod not found')
self.assertEquals(result.errno, jsonrpc.METHOD_NOT_FOUND)
self.assertEquals(result.version, jsonrpc.VERSION_1)
e.addCallback(finished)
return e
def test_callRemoteV2InvalidParams(self):
addr = 'http://localhost:%s' % self.portNumber
proxy = Proxy(addr, jsonrpc.VERSION_2)
d = proxy.callRemote('echo', 'abc', 'def')
e = self.assertFailure(d, jsonrpc.JSONRPCError)
def finished(result):
msg = 'jsonrpc_echo() takes exactly 2 arguments (3 given)'
self.assertEquals(result.strerror, msg)
self.assertEquals(result.errno, jsonrpc.INVALID_PARAMS)
self.assertEquals(result.version, unicode(jsonrpc.VERSION_2))
e.addCallback(finished)
return e
def test_keywordsV1(self):
data = 'some random string'
addr = 'http://localhost:%s' % self.portNumber
proxy = Proxy(addr, jsonrpc.VERSION_1)
d = proxy.callRemote('echo', data=data)
def finished(result):
self.assertEquals(result, data)
d.addCallback(finished)
return d
def test_keywordsV2(self):
data = 'some random string'
addr = 'http://localhost:%s' % self.portNumber
proxy = Proxy(addr, jsonrpc.VERSION_2)
d = proxy.callRemote('echo', data=data)
def finished(result):
self.assertEquals(result, data)
d.addCallback(finished)
return d
def test_keywordsUnexpected(self):
data = 'some random string'
addr = 'http://localhost:%s' % self.portNumber
proxy = Proxy(addr, jsonrpc.VERSION_1)
d = proxy.callRemote('echo', wrongname=data)
e = self.assertFailure(d, jsonrpc.JSONRPCError)
def finished(result):
msg = 'jsonrpc_echo() got an unexpected keyword argument ' + \
'\'wrongname\''
self.assertEquals(result.strerror, msg)
self.assertEquals(result.errno, jsonrpc.INVALID_PARAMS)
e.addCallback(finished)
return d
def test_timeout(self):
""" Google doesn't offer any services on our crazy ports """
addr = 'http://google.com:%s' % self.portNumber
proxy = Proxy(addr, jsonrpc.VERSION_1, connectTimeout=0.1)
d = proxy.callRemote('sleep', 5)
def finished(result):
self.assertTrue(isinstance(result.value, TimeoutError))
d.addErrback(finished)
return d
def test_anonymousLogin(self):
data = 'some random string'
addr = 'http://localhost:%s' % self.portNumber
proxy = Proxy(addr, jsonrpc.VERSION_1, credentials=Anonymous())
d = proxy.callRemote('echo', data)
def finished(result):
self.assertEquals(result, data)
d.addCallback(finished)
return d
def test_loginNotNeccessary(self):
data = 'some random string'
addr = 'http://localhost:%s' % self.portNumber
credentials = UsernamePassword('user', 'password')
proxy = Proxy(addr, credentials=credentials)
d = proxy.callRemote('echo', data)
def finished(result):
self.assertEquals(result, data)
d.addCallback(finished)
return d
def test_poolPassing(self):
pool = HTTPConnectionPool(reactor)
proxy = Proxy('', pool=pool)
self.assertEqual(id(proxy.agent._pool), id(pool))
class TestProxyFactory(TestCase):
def test_init(self):
factory = ProxyFactory()
proxy = factory.getProxy('')
self.assertEqual(proxy.version, jsonrpc.VERSION_1)
self.assertTrue(isinstance(proxy.credentials, Anonymous))
self.assertTrue(proxy.agent._connectTimeout is None)
def test_getProxy(self):
url1 = 'http://fakeurl1'
url2 = 'http://fakeurl2'
version = jsonrpc.VERSION_2
connectTimeout = 30
cred = UsernamePassword('username', 'password')
contextFactory = WebClientContextFactory()
factory = ProxyFactory(version=version, connectTimeout=connectTimeout,
credentials=cred, contextFactory=contextFactory)
proxy1 = factory.getProxy(url1)
proxy2 = factory.getProxy(url2)
self.assertNotEqual(id(proxy1), id(proxy2))
self.assertNotEqual(id(proxy1.agent._pool), id(proxy2.agent._pool))
self.assertEqual(proxy1.url, url1)
self.assertEqual(proxy2.url, url2)
self.assertEqual(proxy1.version, version)
self.assertEqual(proxy2.version, version)
self.assertEqual(proxy1.credentials, cred)
self.assertEqual(proxy2.credentials, cred)
self.assertEqual(proxy1.agent._connectTimeout, connectTimeout)
self.assertEqual(proxy2.agent._connectTimeout, connectTimeout)
def test_sharedPool(self):
factory = ProxyFactory(sharedPool=True)
proxy1 = factory.getProxy('')
proxy2 = factory.getProxy('')
proxy3 = factory.getProxy('')
self.assertNotEqual(id(proxy1), id(proxy2))
self.assertNotEqual(id(proxy2), id(proxy3))
self.assertNotEqual(id(proxy1), id(proxy3))
self.assertEqual(id(proxy1.agent._pool), id(factory._pool))
self.assertEqual(id(proxy2.agent._pool), id(factory._pool))
self.assertEqual(id(proxy3.agent._pool), id(factory._pool))
#
# I trust twisted's well tested Agent and HTTPConnectionPool classes
#
def test_init_persistentConnections(self):
persistent = True
maxConn = 5
timeout = 3600
retry = False
factory = ProxyFactory(persistent=persistent,
maxPersistentPerHost=maxConn,
cachedConnectionTimeout=timeout,
retryAutomatically=retry)
proxy = factory.getProxy('')
self.assertEqual(proxy.agent._pool.persistent, persistent)
self.assertEqual(proxy.agent._pool.maxPersistentPerHost, maxConn)
self.assertEqual(proxy.agent._pool.cachedConnectionTimeout, timeout)
self.assertEqual(proxy.agent._pool.retryAutomatically, retry)
def test_init_sharedPersistentConnections(self):
persistent = True
maxConn = 5
timeout = 3600
retry = False
factory = ProxyFactory(sharedPool=True,
persistent=persistent,
maxPersistentPerHost=maxConn,
cachedConnectionTimeout=timeout,
retryAutomatically=retry)
proxy1 = factory.getProxy('')
proxy2 = factory.getProxy('')
self.assertEqual(id(proxy1.agent._pool), id(proxy2.agent._pool))
self.assertEqual(proxy1.agent._pool.persistent, persistent)
self.assertEqual(proxy1.agent._pool.maxPersistentPerHost, maxConn)
self.assertEqual(proxy1.agent._pool.cachedConnectionTimeout, timeout)
self.assertEqual(proxy1.agent._pool.retryAutomatically, retry)
self.assertEqual(proxy2.agent._pool.persistent, persistent)
self.assertEqual(proxy2.agent._pool.maxPersistentPerHost, maxConn)
self.assertEqual(proxy2.agent._pool.cachedConnectionTimeout, timeout)
self.assertEqual(proxy2.agent._pool.retryAutomatically, retry)
def test_init_HTTPCompression(self):
factory = ProxyFactory(compressedHTTP=True)
proxy = factory.getProxy('')
self.assertTrue(isinstance(proxy.agent, ContentDecoderAgent))
self.assertTrue(isinstance(proxy.agent._agent, Agent))
self.assertTrue('gzip' in proxy.agent._decoders)
self.assertEqual(proxy.agent._decoders['gzip'], GzipDecoder)
class WebClientContextFactory(ssl.ClientContextFactory):
def getContext(self, hostname, port):
return ssl.ClientContextFactory.getContext(self)
class TestSSLProxy(TestCase):
"""
@TODO: All this does is checking whether Agent connects to SSL server...
"""
def setUp(self):
if not (os.path.exists('../ssl-keys/server.key') and
os.path.exists('../ssl-keys/server.crt')):
raise SkipTest('For testing SSL, please put server.key and ' + \
'server.crt to ssl-keys/')
SSLFactory = ssl.DefaultOpenSSLContextFactory('../ssl-keys/server.key',
'../ssl-keys/server.crt')
site = Site(DummyServer())
self.port = reactor.listenSSL(0, site, contextFactory=SSLFactory)
self.portNumber = self.port._realPortNumber
def tearDown(self):
self.port.stopListening()
def test_init(self):
url = 'https://example.org/abcdef'
version = '2.0'
proxy = Proxy(url, version, contextFactory=WebClientContextFactory())
self.assertEquals(proxy.url, url)
self.assertEquals(proxy.version, version)
def test_init_agent(self):
proxy = Proxy('', '', contextFactory=WebClientContextFactory())
self.assertTrue(isinstance(proxy.agent, Agent))
def test_callRemote(self):
"""
The test itself passes, but trial raises "Reactor was unclean" after
tearDown.. Might be related to
http://twistedmatrix.com/trac/ticket/5118
"""
data = 'some random string'
addr = 'https://localhost:%s' % self.portNumber
proxy = Proxy(addr, jsonrpc.VERSION_1,
contextFactory=WebClientContextFactory())
d = proxy.callRemote('echo', data)
def finished(result):
self.assertEquals(result, data)
d.addCallback(finished)
return d
class TestHTTPAuth(TestCase):
"""
@TODO: All this does is basically checking whether auth in Agent works...
"""
def setUp(self):
checker = InMemoryUsernamePasswordDatabaseDontUse(user='password')
portal = Portal(AuthDummyServer(), [checker])
credentialFactory = BasicCredentialFactory('localhost')
resource = HTTPAuthSessionWrapper(portal, [credentialFactory])
site = Site(resource)
self.port = reactor.listenTCP(0, site)
self.portNumber = self.port._realPortNumber
def tearDown(self):
self.port.stopListening()
def test_loginOk(self):
data = 'some random string'
addr = 'http://localhost:%s' % self.portNumber
credentials = UsernamePassword('user', 'password')
proxy = Proxy(addr, credentials=credentials)
d = proxy.callRemote('echo', data)
def finished(result):
self.assertEquals(result, data)
d.addCallback(finished)
return d
def test_loginWrongPassword(self):
addr = 'http://localhost:%s' % self.portNumber
credentials = UsernamePassword('<PASSWORD>', '<PASSWORD>')
proxy = Proxy(addr, credentials=credentials)
d = proxy.callRemote('echo', '')
e = self.assertFailure(d, jsonrpc.JSONRPCError)
def finished(result):
self.assertEquals(result.strerror, 'Unauthorized')
self.assertEquals(result.errno, jsonrpc.INVALID_REQUEST)
e.addCallback(finished)
return d
def test_loginWrongUser(self):
addr = 'http://localhost:%s' % self.portNumber
credentials = UsernamePassword('<PASSWORD>', '<PASSWORD>')
proxy = Proxy(addr, credentials=credentials)
d = proxy.callRemote('echo', '')
e = self.assertFailure(d, jsonrpc.JSONRPCError)
def finished(result):
self.assertEquals(result.strerror, 'Unauthorized')
self.assertEquals(result.errno, jsonrpc.INVALID_REQUEST)
e.addCallback(finished)
return d
def test_noCredentials(self):
addr = 'http://localhost:%s' % self.portNumber
proxy = Proxy(addr, jsonrpc.VERSION_1)
d = proxy.callRemote('echo', '')
e = self.assertFailure(d, jsonrpc.JSONRPCError)
def finished(result):
self.assertEquals(result.strerror, 'Unauthorized')
self.assertEquals(result.errno, jsonrpc.INVALID_REQUEST)
e.addCallback(finished)
return d
def test_anonymousError(self):
addr = 'http://localhost:%s' % self.portNumber
proxy = Proxy(addr, credentials=Anonymous())
d = proxy.callRemote('echo', '')
e = self.assertFailure(d, jsonrpc.JSONRPCError)
def finished(result):
self.assertEquals(result.strerror, 'Unauthorized')
self.assertEquals(result.errno, jsonrpc.INVALID_REQUEST)
e.addCallback(finished)
return d
| 2.09375 | 2 |
camos/plugins/openbxr/openbxr.py | danilexn/camos | 1 | 12766265 | <reponame>danilexn/camos
# -*- coding: utf-8 -*-
# Created on Sat Jun 05 2021
# Last modified on Mon Jun 07 2021
# Copyright (c) CaMOS Development Team. All Rights Reserved.
# Distributed under a MIT License. See LICENSE for more info.
from PyQt5.QtCore import pyqtSignal
import numpy as np
import h5py
from camos.tasks.opening import Opening
from camos.plotter.raster import Raster
from camos.viewport.signalviewer2 import SignalViewer2
class OpenBXR(Opening):
"""This is the plugin to load CMOS data into CaMOS, being able to select the electrodes
"""
plotReady = pyqtSignal()
gridReady = pyqtSignal(np.ndarray)
analysis_name = "Open bxr file"
def __init__(self, *args, **kwargs):
"""Initialization of the object
Args:
model ([type], optional): [description]. Defaults to None.
signal ([type], optional): [description]. Defaults to None.
parent ([type], optional): [description]. Defaults to None.
signal ([type], optional): [description]. Defaults to None.
file ([type], optional): [description]. Defaults to None.
"""
super(OpenBXR, self).__init__(
extensions="BXR File (*.bxr)", show=False, *args, **kwargs
)
def _run(self):
filehdf5_bxr = h5py.File(self.filename)
samplingRate = np.asarray(
filehdf5_bxr["3BRecInfo"]["3BRecVars"]["SamplingRate"]
)[0]
lastFrame = int(
np.asarray(filehdf5_bxr["3BUserInfo"]["TimeIntervals"])[0][3][0][1]
)
duration = lastFrame / samplingRate
SpikeChIDs = np.asarray(filehdf5_bxr["3BResults"]["3BChEvents"]["SpikeChIDs"])
SpikeTimes = (
np.asarray(filehdf5_bxr["3BResults"]["3BChEvents"]["SpikeTimes"])
/ samplingRate
)
NCols = int(np.array(filehdf5_bxr["3BRecInfo"]["3BMeaChip"]["NCols"]))
output_type = [("CellID", "int"), ("Active", "float")]
self.output = np.zeros(shape=(len(SpikeChIDs), 1), dtype=output_type)
self.output[:]["CellID"] = SpikeChIDs.reshape(-1, 1)
self.output[:]["Active"] = SpikeTimes.reshape(-1, 1)
_sv = SignalViewer2(
self.parent, self.output, title="Events from BXR", plotter=Raster
)
self.parent.signalmodel.add_data(
self.output,
"Events from BXR".format(),
_sv,
samplingRate,
properties={
"samplingRate": samplingRate,
"duration": duration,
"timeUnits": "s",
"electrodeX": NCols,
},
)
_sv.display()
def h5printR(self, item, leading=""):
for key in item:
if isinstance(item[key], h5py.Dataset):
print(leading + key + " : " + str(item[key].shape))
else:
print(leading + key)
self.h5printR(item[key], leading + " ")
def h5print(self, filename):
with h5py.File(filename, "r") as h:
print(filename)
self.h5printR(h, " ")
| 2.21875 | 2 |
mergeforms/migrations/0001_initial.py | mbronstein/ssa412 | 0 | 12766266 | <reponame>mbronstein/ssa412
# Generated by Django 3.2.6 on 2021-09-25 23:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('taggit', '0003_taggeditem_add_unique_index'),
]
operations = [
migrations.CreateModel(
name='MergeForm',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sysid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('name', models.CharField(max_length=90, unique=True)),
('uuid', models.CharField(max_length=50, unique=True)),
('mimetype', models.CharField(choices=[('pdf', 'pdf'), ('docx', 'docx'), ('txt', 'txt'), ('html', 'html'), ('xlsx', 'xlsx')], max_length=20)),
('category', models.CharField(choices=[('ss_form', 'ss form'), ('sso_letter', 'sso letter'), ('gen_letter', 'gen letter'), ('ins_letter', 'ins letter'), ('med_rec_req', 'med rec req'), ('lomb_form', 'lomb_form')], max_length=20, null=True)),
('template_type', models.CharField(choices=[('pdf', 'pdf'), ('reportlab', 'reportlab'), ('docx', 'docx')], max_length=20)),
('filename', models.CharField(blank=True, max_length=120, null=True)),
('repo', models.CharField(blank=True, default='mergeforms', max_length=10)),
('description', models.CharField(blank=True, max_length=90, null=True)),
('comments', models.TextField(blank=True, null=True)),
('text_body', models.TextField(blank=True, null=True)),
('blob_body', models.BinaryField(null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
('last_modified_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
('tags', taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')),
],
),
]
| 1.75 | 2 |
runner/ppo.py | 2855239858/CenLight-Traffic-Grid-Signal-Optimization-viaAction-and-State-Decomposition | 1 | 12766267 | """Multi-agent traffic light example (single shared policy)."""
from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy
from flow.envs.multiagent import MyMultiTrafficLightGridPOEnv
from flow.networks import TrafficLightGridNetwork
from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams
from flow.core.params import InFlows, SumoCarFollowingParams, VehicleParams
from flow.controllers import SimCarFollowingController, GridRouter
from ray.tune.registry import register_env
from flow.utils.registry import make_create_env
import numpy as np
# Experiment parameters
N_ROLLOUTS = 20 # number of rollouts per training iteration
N_CPUS = 3 # number of parallel workers
# Environment parameters
HORIZON = 400 # time horizon of a single rollout
V_ENTER = 30 # enter speed for departing vehicles
INNER_LENGTH = 300 # length of inner edges in the traffic light grid network
LONG_LENGTH = 100 # length of final edge in route
SHORT_LENGTH = 300 # length of edges that vehicles start on
# number of vehicles originating in the left, right, top, and bottom edges
N_LEFT, N_RIGHT, N_TOP, N_BOTTOM = 0, 0, 0, 0
EDGE_INFLOW = 300 # inflow rate of vehicles at every edge
N_ROWS = 2 # number of row of bidirectional lanes
N_COLUMNS = 2 # number of columns of bidirectional lanes
# we place a sufficient number of vehicles to ensure they confirm with the
# total number specified above. We also use a "right_of_way" speed mode to
# support traffic light compliance
vehicles = VehicleParams()
num_vehicles = (N_LEFT + N_RIGHT) * N_COLUMNS + (N_BOTTOM + N_TOP) * N_ROWS
vehicles.add(
veh_id="human",
acceleration_controller=(SimCarFollowingController, {}),
car_following_params=SumoCarFollowingParams(
min_gap=2.5,
max_speed=V_ENTER,
decel=7.5, # avoid collisions at emergency stops
speed_mode="right_of_way",
),
routing_controller=(GridRouter, {}),
num_vehicles=num_vehicles)
# inflows of vehicles are place on all outer edges (listed here)
outer_edges = []
outer_edges += ["left{}_{}".format(N_ROWS, i) for i in range(N_COLUMNS)]
outer_edges += ["right0_{}".format(i) for i in range(N_COLUMNS)]
outer_edges += ["bot{}_0".format(i) for i in range(N_ROWS)]
outer_edges += ["top{}_{}".format(i, N_COLUMNS) for i in range(N_ROWS)]
# equal inflows for each edge (as dictate by the EDGE_INFLOW constant)
inflow = InFlows()
for edge in outer_edges:
inflow.add(
veh_type="human",
edge=edge,
# vehs_per_hour=EDGE_INFLOW,
# probability=0.10,
vehs_per_hour = 600,
departLane="free",
departSpeed=V_ENTER)
myNetParams = NetParams(
inflows=inflow,
additional_params={
"speed_limit": V_ENTER + 5, # inherited from grid0 benchmark
"grid_array": {
"short_length": SHORT_LENGTH,
"inner_length": INNER_LENGTH,
"long_length": LONG_LENGTH,
"row_num": N_ROWS,
"col_num": N_COLUMNS,
"cars_left": N_LEFT,
"cars_right": N_RIGHT,
"cars_top": N_TOP,
"cars_bot": N_BOTTOM,
},
"horizontal_lanes": 1,
"vertical_lanes": 1,
},
)
flow_params = dict(
# name of the experiment
exp_tag="grid_0_{}x{}_i{}_multiagent".format(N_ROWS, N_COLUMNS, EDGE_INFLOW),
# name of the flow environment the experiment is running on
env_name=MyMultiTrafficLightGridPOEnv,
# name of the network class the experiment is running on
network=TrafficLightGridNetwork,
# simulator that is used by the experiment
simulator='traci',
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(
restart_instance=True,
sim_step=1,
render=False,
),
# environment related parameters (see flow.core.params.EnvParams)
env=EnvParams(
horizon=HORIZON,
warmup_steps=0,
sims_per_step=1,
additional_params={
"target_velocity": 50,
"switch_time": 3,
"num_observed": 2,
"discrete": False,
"tl_type": "actuated",
"num_local_edges": 4,
"num_local_lights": 4,
},
),
# network-related parameters (see flow.core.params.NetParams and the
# network's documentation or ADDITIONAL_NET_PARAMS component)
net=myNetParams,
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
# parameters specifying the positioning of vehicles upon initialization
# or reset (see flow.core.params.InitialConfig)
initial=InitialConfig(
spacing='custom',
shuffle=True,
),
)
#############################以下为训练部分#################################
def cover_actions(c_a, s_a,num):
# for i in range(len(c_a)):
# if c_a[i] == 1:
# s_a[i] = abs(s_a[i] - 1)
for i in range(num):
if i == c_a:
s_a[i] = 1
return s_a
def data_collection(env, vels, queues):
vehicles = env.k.vehicle
veh_speeds = vehicles.get_speed(vehicles.get_ids())
vels.append(np.mean(veh_speeds))
queued_vels = len([v for v in veh_speeds if v < 1])
queues.append(queued_vels)
return vels, queues
def normalize_formation(state,Agent_NUM):
_state = [[] for i in range(Agent_NUM)]
for i in range(Agent_NUM):
_state[i] = state["center"+str(i)]
return _state
def record_line(log_path, line):
with open(log_path, 'a') as fp:
fp.writelines(line)
fp.writelines("\n")
return True
if __name__ == "__main__":
myTrafficNet = TrafficLightGridNetwork(
name = 'grid',
vehicles = vehicles,
net_params = myNetParams,
)
env = MyMultiTrafficLightGridPOEnv(
env_params=flow_params['env'], sim_params=flow_params['sim'], network=myTrafficNet)
# print(env.scenario.get_edge_list())
# Perpare agent.
from flow.core.ppo_agent import *
############################################################################
############################################################################
Agent_NUM = N_ROWS * N_COLUMNS
Reward_num = 1 #0代表多个rewards,1代表1个
NAME = '2x2_600_PPO_SOFT_try4'
Epoch = 4000
steps = 400
rnn_train_epi = 25
rnn_agent = PPO(s_dim=42*Agent_NUM,a_dim=Agent_NUM+1,name=NAME)
############################################################################
############################################################################
global_counter = 0
each_line_path = "collected_data/ppo/{}_plot_log.txt".format(NAME)
test_epoch_path = "collected_data/ppo/{}_epoch_log.txt".format(NAME)
for ep in range(Epoch):
#RNN_PPO训练步骤
for i in range(rnn_train_epi):
print("当前训练次数:")
print(i)
global_counter += 1
state = env.reset()
state = normalize_formation(state,Agent_NUM)
_state = [n for a in state for n in a ]
ep_r = 0.0
for step in range(steps):
step_r = 0.0
# print(_state)
_state = np.array(_state)
_actions = rnn_agent.choose_action(_state)
# print(_actions)
actions = np.zeros((Agent_NUM,), dtype=int)
rl_actions = cover_actions(_actions, actions,Agent_NUM)
next_state, rewards, done, _ = env.step(rl_actions)
if Reward_num == 0:
for k in range(Agent_NUM):
step_r += rewards[k]/Agent_NUM
ep_r += rewards[k]/Agent_NUM
rnn_agent.experience_store(_state, _actions, step_r)
else:
ep_r += rewards
rnn_agent.experience_store(_state, _actions, rewards)
state = next_state
state = normalize_formation(state,Agent_NUM)
_state = [n for a in state for n in a ]
_state = np.array(_state)
if (step + 1) % BATCH == 0 or step == EP_LEN - 1:
rnn_agent.trajction_process(_state)
rnn_agent.update()
rnn_agent.empty_buffer()
_done = True
for i in range(Agent_NUM):
_done *= done["center"+str(i)]
# print('dome?')
# print(_done)
if _done:
break
print('steps rewards:')
print(ep_r)
rnn_agent.summarize(ep_r, global_counter, 'reward')
if ep % 10 == 0:
rnn_agent.save_params(NAME,ep)
# test phase
if ep >= 0:
print('测试阶段:')
print(ep)
record_line(each_line_path, "*** Epoch: {} ***\n".format(ep))
queue, speed, ret = [], [], []
for i in range(3):
ep_r, ep_q, ep_v = [], [], []
state = env.reset()
state = normalize_formation(state,Agent_NUM)
_state = [n for a in state for n in a ]
for step in range(steps):
step_r = 0
data_collection(env, ep_v, ep_q)
_state = np.array(_state)
_actions = rnn_agent.choose_action(_state)
actions = np.zeros((Agent_NUM,), dtype=int)
rl_actions = cover_actions(_actions, actions,Agent_NUM)
next_state, rewards, done, _ = env.step(rl_actions)
if Reward_num == 0:
for k in range(Agent_NUM):
step_r += rewards[k]/Agent_NUM
ep_r.append(step_r)
else:
ep_r.append(rewards)
ep_r.append(step_r)
state = next_state
state = normalize_formation(state,Agent_NUM)
_state = [n for a in state for n in a ]
_done = True
for i in range(Agent_NUM):
_done *= done["center"+str(i)]
if _done:
break
queue.append(np.array(ep_q).mean())
speed.append(np.array(ep_v).mean())
ret.append(np.array(ep_r).mean())
record_line(each_line_path, "Queue: " + str(ep_q) + "\n")
record_line(each_line_path, "Speed: " + str(ep_v) + "\n")
record_line(each_line_path, "Return: " + str(ep_r) + "\n")
# record...
print("*** Epoch: {} ***\n".format(ep))
print("| Queue: {}, std: {} |".format(np.array(queue).mean(), np.array(queue).std()))
print("| Speed: {}, std: {} |".format(np.array(speed).mean(), np.array(speed).std()))
print("| Return: {}, std: {} |".format(np.array(ret).mean(), np.array(ret).std()))
print("*****************\n")
record_line(test_epoch_path, "*** Epoch: {} ***\n".format(ep))
record_line(test_epoch_path, "| Queue: {}, std: {} |".format(np.array(queue).mean(), np.array(queue).std()))
record_line(test_epoch_path, "| Speed: {}, std: {} |".format(np.array(speed).mean(), np.array(speed).std()))
record_line(test_epoch_path, "| Return: {}, std: {} |".format(np.array(ret).mean(), np.array(ret).std()))
record_line(test_epoch_path, "*****************\n")
| 2.71875 | 3 |
paas-ce/paas/esb/components/bk/apis/cc/add_app.py | renmcc/bk-PaaS | 767 | 12766268 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from django import forms
from common.forms import BaseComponentForm, ListField
from common.constants import API_TYPE_OP
from components.component import Component
from .toolkit import tools, configs
class AddApp(Component):
"""
apiLabel {{ _("新建业务") }}
apiMethod POST
### {{ _("功能描述") }}
{{ _("新建业务") }}
### {{ _("请求参数") }}
{{ common_args_desc }}
#### {{ _("接口参数") }}
| {{ _("字段") }} | {{ _("类型") }} | {{ _("必选") }} | {{ _("描述") }} |
|-----------|------------|--------|------------|
| app_name | string | {{ _("是") }} | {{ _("业务名") }} |
| maintainers | string | {{ _("是") }} | {{ _("运维人员, 多个人之间用逗号分隔") }} |
| product_pm | string | {{ _("否") }} | {{ _("产品人员,多个人之间用逗号分隔") }} |
| developer | string | {{ _("否") }} | {{ _("开发人员,多个人之间用逗号分隔") }} |
| tester | string | {{ _("否") }} | {{ _("测试人员,多个人之间用逗号分隔") }} |
| operator | string | {{ _("否") }} | {{ _("操作者,多个人之间用逗号分隔") }} |
| company_name | string | {{ _("是") }} | {{ _("公司名,cmdb配置文件中定义的constants.php中的 COMPANY_NAME") }} |
| level | int | {{ _("是") }} | {{ _("业务拓扑级别,2或者3") }} |
| life_cycle | string | {{ _("是") }} | {{ _("生成周期,1: 测试中, 2: 已上线, 3: 停运其中的一个值") }} |
### {{ _("请求参数示例") }}
```python
{
"app_code": "esb_test",
"app_secret": "xxx",
"bk_token": "xxx",
"app_name": "Test",
"maintainers": "admin",
"product_pm": "admin",
"company_name": "CompanyName",
"level": 3,
"life_cycle": "1"
}
```
### 返回结果示例
```python
{
"result": true,
"code": "00",
"message": "",
"data": {
"appId": 25
}
}
```
"""
sys_name = configs.SYSTEM_NAME
api_type = API_TYPE_OP
host = configs.host
class Form(BaseComponentForm):
app_name = forms.CharField(label='business name', required=True)
maintainers = ListField(label='OPS', required=True)
product_pm = ListField(label='PM', required=False)
developer = ListField(label='developer', required=False)
tester = ListField(label='test staff', required=False)
operator = ListField(label='operator', required=False)
company_name = forms.CharField(label='company name', required=True)
level = forms.IntegerField(label='business topology level', required=True)
life_cycle = forms.CharField(label='life cycle', required=True)
def clean(self):
data = self.cleaned_data
return {
'ApplicationName': data['app_name'],
'Maintainers': ','.join(data['maintainers']),
'ProductPm': ','.join(data['product_pm']),
'Developer': ','.join(data['developer']),
'Tester': ','.join(data['tester']),
'Operator': ','.join(data['operator']),
'CompanyName': data['company_name'],
'Level': data['level'],
'LifeCycle': data['life_cycle'],
}
def handle(self):
self.form_data['Creator'] = self.current_user.username
client = tools.CCClient(self)
self.response.payload = client.post_request(
self.host,
'/api/app/addApp',
data=self.form_data,
)
| 1.6875 | 2 |
src/qbr.py | F2Binary/qbr | 0 | 12766269 | <reponame>F2Binary/qbr<filename>src/qbr.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Filename : qbr.py
# Author : <NAME>
# Created : Tue, 26 Jan 2016
# Last Modified : Sun, 31 Jan 2016
from sys import exit as Die
try:
import sys
import kociemba
import argparse
from combiner import combine
from video import webcam
from normalizer import normalize
except ImportError as err:
Die(err)
class Qbr:
def __init__(self, normalize, language):
self.humanize = normalize
self.language = (language[0]) if isinstance(language, list) else language
def run(self):
state = webcam.scan()
if not state:
print('\033[0;33m[QBR SCAN ERROR] Ops, you did not scan in all 6 sides.')
print('Please try again.\033[0m')
Die(1)
unsolvedState = combine.sides(state)
try:
algorithm = kociemba.solve(unsolvedState)
length = len(algorithm.split(' '))
except Exception as err:
print('\033[0;33m[QBR SOLVE ERROR] Ops, you did not scan in all 6 sides correctly.')
print('Please try again.\033[0m')
Die(1)
print('-- SOLUTION --')
print('Starting position:\n front: green\n top: white\n')
print(algorithm, '({0} moves)'.format(length), '\n')
if self.humanize:
manual = normalize.algorithm(algorithm, self.language)
for index, text in enumerate(manual):
print('{}. {}'.format(index+1, text))
Die(0)
if __name__ == '__main__':
# define argument parser.
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--normalize', default=False, action='store_true',
help='Shows the solution normalized. For example "R2" would be: \
"Turn the right side 180 degrees".')
parser.add_argument('-l', '--language', nargs=1, default='en',
help='You can pass in a single \
argument which will be the language for the normalization output. \
Default is "en".')
args = parser.parse_args()
# run Qbr with its arguments.
Qbr(
args.normalize,
args.language
).run()
| 2.328125 | 2 |
KMCLib/PluginInterfaces/KMCRateCalculatorPlugin.py | txd283/vacancy-diffusion-kmclib | 1 | 12766270 | """ Module for the KMCRateCalculatorPlugin class """
# Copyright (c) 2013 <NAME>
#
# This file is part of the KMCLib project distributed under the terms of the
# GNU General Public License version 3, see <http://www.gnu.org/licenses/>.
#
import numpy
from KMCLib.Backend import Backend
from KMCLib.Exceptions.Error import Error
class KMCRateCalculatorPlugin(Backend.RateCalculator):
"""
Class for providing an interface to easily extend and customize
the behaviour of the calculation of individual rates in the KMC simulation.
"""
def __init__(self):
"""
Base class constructor.
"""
# Call the C++ base class constructor.
Backend.RateCalculator.__init__(self)
# Call the custom setup.
self.initialize()
def backendRateCallback(self,
cpp_coords,
coords_len,
types_before,
types_after,
rate_constant,
process_number,
global_x,
global_y,
global_z):
"""
Function called from C++ to get the rate. It function recieves
the data from C++ and parse it to a Python friendly format to send it
forward to the custom rate function.
"""
# Call and return the custom rate.
# PERFORMME: Consider creating the numpy array in C++ if possible.
global_coordinate = (global_x, global_y, global_z)
return self.rate(numpy.array(cpp_coords).reshape(coords_len,3),
types_before,
types_after,
rate_constant,
process_number,
global_coordinate)
def initialize(self):
"""
Called as the last statement in the base class constructor
to allow for custom setup of the object.
"""
pass
def rate(self,
coords,
types_before,
types_after,
rate_constant,
process_number,
global_coordinate):
"""
Called from the base class to get the rate for a particular
local geometry. Any class inheriting from the plugin base class
must provide an implementation of this function.
:param coords: The coordinates of the configuration as a Nx3 numpy array
in fractional units of the primitive cell.
:param types_before: The types before the process, as tuple of strings.
:param types_after: The types after the process, as tuple of strings.
:param rate_constant: The rate constant associated with the process
to either update or replace.
:param process_number: The process id number.
:param global_coordinate: The global coordinate of the central index.
:returns: The custom rate of the process. Note that the returned rate must
not be negative or zero.
"""
raise Error("The rate(self,...) API function in the 'KMCRateCalculator' base class must be overloaded when using a custom rate calculator.")
def cutoff(self):
"""
To determine the radial cutoff of the geometry around the central
lattice site to cut out and send down to the rustom rate function.
If not implemented by derrived classes the default is to use
the cutoff of the largetst process local geometry.
:returns: The desiered cutoff in primitive cell internal coordinates.
:rtype: float
"""
# Returning None results in default behaviour.
return None
| 2.265625 | 2 |
club/views.py | Tizeta2018/itc-172-web-python | 0 | 12766271 | <filename>club/views.py<gh_stars>0
from django.shortcuts import render
from.models import Meeting, MeetingMinutes, Resource, Event
from django.contrib.auth.decorators import login_required
#create your views here.
def index (request):
return render(request, 'club/index.html')
#importing all the objects under the producttype
def clubmeetings (request):
meetings_list=Meeting.objects.all()
return render (request, 'club/meetings.html',{'meetings_list': meetings_list})
def clubminutes (request):
minutes_list=MeetingMinutes.objects.all()
return render (request, 'club/minutes.html',{'minutes_list': minutes_list})
def clubresources (request):
resources_list=Resource.objects.all()
return render (request, 'club/resources.html',{'resources_list': resources_list})
def clubevents (request):
eventss_list=Events.objects.all()
return render (request, 'club/events.html',{'events_list': events_list})
def getmeeting(request):
meetings_list=Meeting.objects.all()
return render (request, 'club/meetings.html',{'meetings_list': meetings_list})
def meetingdetail(request, id):
detail=get_object_or_404(Meeting, pk=id)
context={'detail': detail}
return render (request, 'club/details.html',context= context)
#form view
@login_required
def newResource(request):
form=ResourceForm
if request.method=='POST':
form=ResourceForm(request.POST)
if form.is_valid():
post=form.save(commit=True)
post.save()
form=ResourceForm()
else:
form=ResourceForm()
return render(request, 'club/newresource.html', {'form': form})
| 2.015625 | 2 |
models/test.py | DimensionPrism/Federated-Learning | 0 | 12766272 | import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
def test_img(model, dataset, args):
model.eval()
test_loss = 0
correct = 0
data_loader = DataLoader(dataset, batch_size=args.batch_size)
with torch.no_grad():
for index, data in enumerate(data_loader):
images, labels = data
images, labels = images.cuda(), labels.cuda()
output = model(images)
test_loss += F.cross_entropy(output, labels, reduction="sum").item()
predicted = output.max(1, keepdim=True)[1]
correct += predicted.eq(labels.view_as(predicted)).sum().item()
accuracy = 100. * correct / len(data_loader.dataset)
return accuracy, test_loss
| 2.703125 | 3 |
ldt/tests/dicts/semantics/test_babelnet.py | Darkdragon84/ldt | 14 | 12766273 | import unittest
import os
os.environ["TESTING_LDT"] = "TRUE"
import ldt
class Tests(unittest.TestCase):
'''
The tests in this block inspect the BabelNet functionality: the pre-computed
output is expected to be contained, but the module is not punished for
new words not in the pre-computed lists - in case the dictionaries get
updated.
'''
# def test_babelnet_initialization(self):
# test = ldt.dicts.semantics.BabelNet()
# test.language = "Italian"
# self.assertEqual(test.language, "IT")
#
# def test_babelnet_ids(self):
# test = ldt.dicts.semantics.BabelNet()
# test.language = "English"
# res = test.get_ids("cat")
# self.assertIn('bn:00516031n', res)
#
# def test_babelnet_is_word(self):
# test = ldt.dicts.semantics.BabelNet()
# test.language = "English"
# self.assertFalse(test.is_a_word("catttttt"))
#
# def test_babelnet_lemmas(self):
# test = ldt.dicts.semantics.BabelNet()
# test.language = "English"
# test.lowercasing = True
# res = test.get_lemmas('bn:00516031n')
# self.assertIn('alternative_versions_of_kitty_pryde', res)
#
# def test_babelnet_edges(self):
# test = ldt.dicts.semantics.BabelNet()
# test.language = "English"
# res = test.get_edges('bn:00516031n')
# self.assertIn('bn:00004927n', res["other"])
# def test_babelnet_relation(self):
# test = ldt.dicts.semantics.BabelNet()
# test.language = "English"
# res = test.get_relation("senator", relation="hypernyms")
# self.assertIn('legislator', res)
#
# def test_babelnet_relations(self):
# test = ldt.dicts.semantics.BabelNet()
# test.language = "English"
# res = test.get_relations("senator", relations=("hypernyms"))
# self.assertIn('legislator', res["hypernyms"]) | 2.8125 | 3 |
section2_change_pars_for_strong_hurricanes/Source_code_for_extracting_data/source_code_change_Clz_isftcflx_2/2_Plot_wind_intensity_time_series_errorbar.py | Sunnyfred/Atlantic_Hurricane_Simulations | 0 | 12766274 | import csv
import matplotlib as matplot
import matplotlib.pyplot as plt
import numpy as np
# List the colors that will be used for tracing the track.
colors = ['black','blue','red','green', 'cyan', \
'gray', 'gold', 'lightcoral', 'turquoise','red','blue','green','pink']
patterns = ['-', '--','--','--','--','--','--','--', ':','-', '--', ':','-', '--', ':',\
'-.', '-.', '-.', ':', '--', '-']
markers = ['.',',','o','v','8','s','+','x','X','D','^','<','>','v']
sizes = [10, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 3, 6,5,4,3,2,2]
# Path to the csv file
dir1 = 'C:/Users/limgr/Desktop/Katrina_wind_intensity_8km.csv'
dir2 = 'C:/Users/limgr/Desktop/Maria_wind_intensity_8km.csv'
dir3 = 'C:/Users/limgr/Desktop/Irma_wind_intensity_8km.csv'
dir4 = 'C:/Users/limgr/Desktop/Dorian_wind_intensity_8km.csv'
dir7 = 'C:/Users/limgr/Desktop/Lorenzo_wind_intensity_8km.csv'
c=0
rows=[]
Times=[]
Times=[]
values=[]
with open(dir1, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times.append(list(row.keys()))
line_count += 1
#print(row)
rows.append(row)
values.append(list(row.values()))
line_count += 1
print(f'Processed {line_count} lines.')
Times0=Times[0]
print(Times0)
print(values[0])
for i in range(0,line_count-1):
if i==0:
tmp=[float(i)*0.5144444 for i in values[i]]
#tmp=[float(i) for i in values[i]]
else:
tmp=[float(i) for i in values[i]]
plt.plot( Times0[:5], tmp[:5], color = colors[c], marker='s', linestyle=patterns[c],\
markersize=sizes[c])
c+=1
plt.legend(["Real Track",
"C0.0001",\
"C0.01",\
"C1",\
"C100"],\
loc = "upper right", \
prop={'size': 7})
# plt.legend(["Oussama_NoTurb", "WRF_NoTurb", \
# "WRFSWAN_NoTurb_swdt600_cpdt600_swgr11p1_swh2",\
# "WRFSWAN_NoTurb_swdt60_cpdt600_swgr11p1_swh2",\
# "WRFSWAN_NoTurb_swdt600_cpdt60_swgr11p1_swh2",\
# "WRFSWAN_NoTurb_swdt600_cpdt600_swgr11p1_swh2",\
# "WRFSWAN_NoTurb_swdt600_cpdt600_swgr11p1_swh4",\
# 'WRFSWAN_NoTurb_swdt600_cpdt600_swgr32p0_swh2',\
# 'WRFSWAN_NoTurb_swdt600_cpdt3600_swgr11p1_swh2'],loc = "lower center", \
# prop={'size': 7})
# plt.legend(["Oussama_NoTurb", "WRF_NoTurb", \
# "WRFSWAN_NoTurb_1",\
# "WRFSWAN_NoTurb_2",\
# "WRFSWAN_NoTurb_3",\
# "WRFSWAN_NoTurb_4",\
# "WRFSWAN_NoTurb_5",\
# 'WRFSWAN_NoTurb_6',\
# 'WRFSWAN_NoTurb_7'],loc = "lower center", \
# prop={'size': 7})
plt.xlabel("Time Step [hr]", fontsize=14)
plt.ylabel("Intensity", fontsize=14)
plt.title("Katrina Intensity ", {'size': 20})
plt.savefig('C:/Users/limgr/Desktop/katrina_wind_intensity_A.png')
plt.show()
# Save the plot
#plt.savefig('Output.png')
c=0
rows=[]
Times=[]
Times=[]
values=[]
with open(dir2, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times.append(list(row.keys()))
line_count += 1
#print(row)
rows.append(row)
values.append(list(row.values()))
line_count += 1
print(f'Processed {line_count} lines.')
Times0=Times[0]
print(Times0)
print(values[0])
for i in range(0,line_count-1):
if i==0:
tmp=[float(i)*0.5144444 for i in values[i]]
#tmp=[float(i) for i in values[i]]
else:
tmp=[float(i) for i in values[i]]
plt.plot( Times0[:5], tmp[:5], color = colors[c], marker='s', linestyle=patterns[c],\
markersize=sizes[c])
c+=1
plt.legend(["Real Track",
"C0.0001",\
"C0.01",\
"C1",\
"C100"],\
loc = "upper right", \
prop={'size': 7})
plt.xlabel("Time Step [hr]", fontsize=14)
plt.ylabel("Intensity", fontsize=14)
plt.title("Maria Intensity ", {'size': 20})
plt.savefig('C:/Users/limgr/Desktop/maria_wind_intensity_A.png')
plt.show()
c=0
rows=[]
Times=[]
Times=[]
values=[]
with open(dir3, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times.append(list(row.keys()))
line_count += 1
#print(row)
rows.append(row)
values.append(list(row.values()))
line_count += 1
print(f'Processed {line_count} lines.')
Times0=Times[0]
print(Times0)
print(values[0])
for i in range(0,line_count-1):
if i==0:
tmp=[float(i)*0.5144444 for i in values[i]]
#tmp=[float(i) for i in values[i]]
else:
tmp=[float(i) for i in values[i]]
plt.plot( Times0, tmp, color = colors[c], marker='s', linestyle=patterns[c],\
markersize=sizes[c])
c+=1
plt.legend(["Real Track",
"C0.0001",\
"C0.01",\
"C1",\
"C100"],\
loc = "upper right", \
prop={'size': 7})
plt.xlabel("Time Step [hr]", fontsize=14)
plt.ylabel("Intensity", fontsize=14)
plt.title("Irma Intensity ", {'size': 20})
plt.savefig('C:/Users/limgr/Desktop/irma_wind_intensity_A.png')
plt.show()
c=0
rows=[]
Times=[]
Times=[]
values=[]
with open(dir4, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times.append(list(row.keys()))
line_count += 1
#print(row)
rows.append(row)
values.append(list(row.values()))
line_count += 1
print(f'Processed {line_count} lines.')
Times0=Times[0]
print(Times0)
print(values[0])
for i in range(0,line_count-1):
if i==0:
tmp=[float(i)*0.5144444 for i in values[i]]
#tmp=[float(i) for i in values[i]]
else:
tmp=[float(i) for i in values[i]]
plt.plot( Times0[:-2], tmp[:-2], color = colors[c], marker='s', linestyle=patterns[c],\
markersize=sizes[c])
c+=1
plt.legend(["Real Track",
"C0.0001",\
"C0.01",\
"C1",\
"C100"],\
loc = "upper right", \
prop={'size': 7})
plt.xlabel("Time Step [hr]", fontsize=14)
plt.ylabel("Intensity", fontsize=14)
plt.title("Dorian Intensity ", {'size': 20})
plt.savefig('C:/Users/limgr/Desktop/dorian_wind_intensity_A.png')
plt.show()
c=0
rows=[]
Times=[]
Times=[]
values=[]
with open(dir7, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times.append(list(row.keys()))
line_count += 1
#print(row)
rows.append(row)
values.append(list(row.values()))
line_count += 1
print(f'Processed {line_count} lines.')
Times0=Times[0]
print(Times0)
print(values[0])
for i in range(0,line_count-1):
if i==0:
tmp=[float(i)*0.5144444 for i in values[i]]
#tmp=[float(i) for i in values[i]]
else:
tmp=[float(i) for i in values[i]]
plt.plot( Times0, tmp, color = colors[c], marker='s', linestyle=patterns[c],\
markersize=sizes[c])
c+=1
plt.legend(["Real Track",
"C0.0001",\
"C0.01",\
"C1",\
"C100"],\
loc = "upper right", \
prop={'size': 7})
plt.xlabel("Time Step [hr]", fontsize=14)
plt.ylabel("Intensity", fontsize=14)
plt.title("Lorenzo Intensity ", {'size': 20})
plt.savefig('C:/Users/limgr/Desktop/lorenzo_wind_intensity_A.png')
plt.show()
rows1=[]
Times1=[]
Times1=[]
values1=[]
rows2=[]
Times2=[]
Times2=[]
values2=[]
rows3=[]
Times3=[]
Times3=[]
values3=[]
rows4=[]
Times4=[]
Times4=[]
values4=[]
rows5=[]
Times5=[]
Times5=[]
values5=[]
rows6=[]
Times6=[]
Times6=[]
values6=[]
rows7=[]
Times7=[]
Times7=[]
values7=[]
# Set the working space.
#os.chdir(Dir_Output)
# Initiate the varaibles that will contain the output files.
#Forecast_Outputs_NoTurb = ""
#Real_Output = ""
#########################################################################
# This function returns a list of all the files in the output directory.#
#########################################################################
#def list_files (Dir, Forecast_Outputs_NoTurb, Real_Output):
# for f in os.listdir(Dir):
# if (f == "Real_Output.csv"):
# Real_Output = f
# elif (f.find('NoTurb') != -1):
# Forecast_Outputs_NoTurb = f
# return (Forecast_Outputs_NoTurb, Real_Output)
# Calling the list_files function to classify files according to the turbulence model
#(Forecast_Outputs_NoTurb, Real_Output) = list_files (Dir_Output, Forecast_Outputs_NoTurb, Real_Output)
#print (Real_Output)
#print (Forecast_Outputs_Smag2D)
#print (Forecast_Outputs_NoTurb)
###################################################################
# This function returns a list of wind speed for each output file.#
###################################################################
real1_track=[]
oussama1=[]
wrf1=[]
with open(dir1, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
sim_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times1.append(list(row.keys()))
real1_track.append(list(row.values()))
line_count += 1
else:
rows1.append(row)
values1.append(list(row.values()))
line_count += 1
print('There is totally ',(line_count-1)*(len(row)),' data points')
simu1=np.array(values1, dtype=np.float32)
real1=np.array(real1_track, dtype=np.float32)
real1=real1*0.5144444
real1=real1
simu_error1=abs(simu1-real1[:,None])/real1[:,None]#/((line_count-3)*(len(row)))
print('absolute pressure error')
print(abs(simu1-real1[:,None]))
real2_track=[]
oussama2=[]
wrf2=[]
with open(dir2, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
sim_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times2.append(list(row.keys()))
real2_track.append(list(row.values()))
line_count += 1
else:
rows2.append(row)
values2.append(list(row.values()))
line_count += 1
print('There is totally ',(line_count-1)*(len(row)),' data points')
simu2=np.array(values2, dtype=np.float32)
real2=np.array(real2_track, dtype=np.float32)
real2=real2*0.5144444
real2=real2
simu_error2=abs(simu2-real2[:,None])/real2[:,None]#/((line_count-3)*(len(row)))
print('absolute pressure error')
print(abs(simu2-real2[:,None]))
real3_track=[]
oussama3=[]
wrf3=[]
with open(dir3, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
sim_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times3.append(list(row.keys()))
real3_track.append(list(row.values()))
line_count += 1
else:
rows3.append(row)
values3.append(list(row.values()))
line_count += 1
print('There is totally ',(line_count-1)*(len(row)),' data points')
simu3=np.array(values3, dtype=np.float32)
real3=np.array(real3_track, dtype=np.float32)
real3=real3*0.5144444
real3=real3
simu_error3=abs(simu3-real3[:,None])/real3[:,None]#/((line_count-3)*(len(row)))
print('absolute pressure error')
print(abs(simu3-real3[:,None]))
real4_track=[]
oussama4=[]
wrf4=[]
with open(dir4, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
sim_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times4.append(list(row.keys()))
real4_track.append(list(row.values()))
line_count += 1
else:
rows4.append(row)
values4.append(list(row.values()))
line_count += 1
print('There is totally ',(line_count-1)*(len(row)),' data points')
simu4=np.array(values4, dtype=np.float32)
real4=np.array(real4_track, dtype=np.float32)
real4=real4*0.5144444
real4=real4
simu_error4=abs(simu4-real4[:,None])/real4[:,None]#/((line_count-3)*(len(row)))
print('absolute pressure error')
print(abs(simu4-real4[:,None]))
real7_track=[]
oussama7=[]
wrf7=[]
with open(dir7, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
line_count = 0
sim_count = 0
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
Times7.append(list(row.keys()))
real7_track.append(list(row.values()))
line_count += 1
else:
rows7.append(row)
values7.append(list(row.values()))
line_count += 1
print('There is totally ',(line_count-1)*(len(row)),' data points')
simu7=np.array(values7, dtype=np.float32)
real7=np.array(real7_track, dtype=np.float32)
real7=real7*0.5144444
real7=real7
simu_error7=abs(simu7-real7[:,None])/real7[:,None]#/((line_count-3)*(len(row)))
print('absolute pressure error')
print(abs(simu7-real7[:,None]))
#ouss_all=np.append(ouss1[0][:],ouss2[0][:],ouss3[0][:],ouss4[0][:],axis=0)
#error_all=np.append(error1[0][1][:],error2[0][1][:],error3[0][1][:],error4[0][1][:], axis=0)
ouss_error=np.zeros((4, 4))
wrf_error=np.zeros((4, 4))
par1_error=np.zeros((4, 4))
par2_error=np.zeros((4, 4))
par3_error=np.zeros((4, 4))
par4_error=np.zeros((4, 4))
par5_error=np.zeros((4, 4))
# par6_error=np.zeros((4, 9))
# par7_error=np.zeros((4, 9))
# par8_error=np.zeros((4, 9))
# par9_error=np.zeros((4, 9))
# print(np.shape(values4))
# print(np.shape(error4))
# print(ouss_error)
# print(simu_error)
# par1_error[0]=simu_error1[0][0][:]
# par1_error[1]=simu_error2[0][0][:]
# par1_error[2]=simu_error3[0][0][:]
# par1_error[3]=simu_error4[0][0][:]
# par1_error[4]=simu_error5[0][0][:]
# par1_error[5]=simu_error6[0][0][:]
par1_error=np.concatenate((simu_error1[0][0][0:5],simu_error2[0][0][:],\
simu_error3[0][0][:],simu_error4[0][0][:-2],simu_error7[0][0][:]))
par1_error=par1_error.flatten()
par1_error_mean=np.mean(par1_error)
par1_error_std=np.std(par1_error)
# par2_error[0]=simu_error1[0][1][:]
# par2_error[1]=simu_error2[0][1][:]
# par2_error[2]=simu_error3[0][1][:]
# par2_error[3]=simu_error4[0][1][:]
# par2_error[4]=simu_error5[0][1][:]
# par2_error[5]=simu_error6[0][1][:]
par2_error=np.concatenate((simu_error1[0][1][0:5],simu_error2[0][1][:],\
simu_error3[0][1][:],simu_error4[0][1][:-2],simu_error7[0][1][:]))
par2_error=par2_error.flatten()
par2_error_mean=np.mean(par2_error)
par2_error_std=np.std(par2_error)
# par3_error[0]=simu_error1[0][2][:]
# par3_error[1]=simu_error2[0][2][:]
# par3_error[2]=simu_error3[0][2][:]
# par3_error[3]=simu_error4[0][2][:]
# par3_error[4]=simu_error5[0][2][:]
# par3_error[5]=simu_error6[0][2][:]
par3_error=np.concatenate((simu_error1[0][2][0:5],simu_error2[0][2][:],\
simu_error3[0][2][:],simu_error4[0][2][:-2],simu_error7[0][2][:]))
par3_error=par3_error.flatten()
par3_error_mean=np.mean(par3_error)
par3_error_std=np.std(par3_error)
# par4_error[0]=simu_error1[0][3][:]
# par4_error[1]=simu_error2[0][3][:]
# par4_error[2]=simu_error3[0][3][:]
# par4_error[3]=simu_error4[0][3][:]
# par4_error[4]=simu_error5[0][3][:]
# par4_error[5]=simu_error6[0][3][:]
par4_error=np.concatenate((simu_error1[0][3][0:5],simu_error2[0][3][:],\
simu_error3[0][3][:],simu_error4[0][3][:-2],simu_error7[0][3][:]))
par4_error=par4_error.flatten()
par4_error_mean=np.mean(par4_error)
par4_error_std=np.std(par4_error)
hurricanes = ['C0.0001', 'C0.01', 'C1', 'C100']
x_pos = np.arange(len(hurricanes))
CTEs = [par1_error_mean,par2_error_mean,\
par3_error_mean,par4_error_mean]
errors = [par1_error_std,par2_error_std,\
par3_error_std,par4_error_std]
fig, ax = plt.subplots()
ax.bar(x_pos, CTEs, yerr=errors, align='center', alpha=0.5, ecolor='black', capsize=10)
ax.set_ylabel('Intensity')
ax.set_xticks(x_pos)
ax.set_xticklabels(hurricanes)
ax.set_title('Hurricanes')
ax.yaxis.grid(True)
for i, v in enumerate(CTEs):
ax.text(i, v+0.02, str(round(v, 3)), color='red', fontweight='bold')
# Save the figure and show
fig.autofmt_xdate()
plt.tight_layout()
#plt.savefig('wind_intensity_bar_plot.png')
plt.savefig('C:/Users/limgr/Desktop/wind_intensity_bar_plot.png')
plt.show()
| 3.015625 | 3 |
droplets/tests/test_droplets.py | tefavidal/py-droplets | 2 | 12766275 | <filename>droplets/tests/test_droplets.py
"""
.. codeauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import pytest
from scipy import integrate
from pde.grids import UnitGrid
from pde.tools.misc import skipUnlessModule
from droplets import droplets
def test_simple_droplet():
"""test a given simple droplet"""
d = droplets.SphericalDroplet((1, 2), 1)
assert d.surface_area == pytest.approx(2 * np.pi)
np.testing.assert_allclose(d.interface_position(0), [2, 2])
np.testing.assert_allclose(d.interface_position([0]), [[2, 2]])
d.volume = 3
assert d.volume == pytest.approx(3)
@pytest.mark.parametrize("dim", [1, 2, 3])
def test_random_droplet(dim):
"""tests simple droplet"""
pos = np.random.uniform(0, 10, dim)
radius = np.random.uniform(2, 3)
d1 = droplets.SphericalDroplet(pos, radius)
d2 = droplets.SphericalDroplet(np.zeros(dim), radius)
d2.position = pos
assert d1.dim == dim
assert d1.volume > 0
assert d1.surface_area > 0
assert d1 == d2
d3 = d1.copy()
assert d1 == d3
assert d1 is not d3
vol = np.random.uniform(10, 30)
d2.volume = vol
assert d2.volume == pytest.approx(vol)
f = d1.get_phase_field(UnitGrid([10] * dim), vmin=0.2, vmax=0.8, label="test")
assert f.label == "test"
assert np.all(f.data >= 0.2)
assert np.all(f.data <= 0.8)
assert np.any(f.data == 0.2)
assert np.any(f.data == 0.8)
def test_perturbed_droplet_2d():
"""test methods of perturbed droplets in 2d"""
d = droplets.PerturbedDroplet2D([0, 1], 1, 0.1, [0.0, 0.1, 0.2])
d.volume
d.interface_distance(0.1)
d.interface_position(0.1)
d.interface_curvature(0.1)
def test_perturbed_droplet_3d():
"""test methods of perturbed droplets in 2d"""
d = droplets.PerturbedDroplet3D([0, 1, 2], 1, 0.1, [0.0, 0.1, 0.2, 0.3])
d.volume_approx
d.interface_distance(0.1, 0.2)
d.interface_position(0.1, 0.2)
d.interface_curvature(0.1, 0.2)
def test_perturbed_volume():
"""test volume calculation of perturbed droplets"""
pos = np.random.randn(2)
radius = 1 + np.random.random()
amplitudes = np.random.uniform(-0.2, 0.2, 6)
d = droplets.PerturbedDroplet2D(pos, radius, 0, amplitudes)
def integrand(φ):
r = d.interface_distance(φ)
return 0.5 * r ** 2
vol = integrate.quad(integrand, 0, 2 * np.pi)[0]
assert vol == pytest.approx(d.volume)
vol = np.random.uniform(1, 2)
d.volume = vol
assert vol == pytest.approx(d.volume)
pos = np.random.randn(3)
radius = 1 + np.random.random()
d = droplets.PerturbedDroplet3D(pos, radius, 0, np.zeros(7))
assert d.volume == pytest.approx(4 * np.pi / 3 * radius ** 3)
def test_surface_area():
"""test surface area calculation of droplets"""
# perturbed 2d droplet
R0 = 3
amplitudes = np.random.uniform(-1e-2, 1e-2, 6)
# unperturbed droplets
d1 = droplets.SphericalDroplet([0, 0], R0)
d2 = droplets.PerturbedDroplet2D([0, 0], R0)
assert d1.surface_area == pytest.approx(d2.surface_area)
assert d2.surface_area == pytest.approx(d2.surface_area_approx)
# perturbed droplet
d1 = droplets.SphericalDroplet([0, 0], R0)
d2 = droplets.PerturbedDroplet2D([0, 0], R0, amplitudes=amplitudes)
assert d1.surface_area != pytest.approx(d2.surface_area)
assert d2.surface_area == pytest.approx(d2.surface_area_approx, rel=1e-4)
def test_curvature():
"""test interface curvature calculation"""
# spherical droplet
for dim in range(1, 4):
d = droplets.SphericalDroplet(np.zeros(dim), radius=np.random.uniform(1, 4))
assert d.interface_curvature == pytest.approx(1 / d.radius)
# perturbed 2d droplet
R0 = 3
epsilon = 0.1
amplitudes = epsilon * np.array([0.1, 0.2, 0.3, 0.4])
def curvature_analytical(φ):
"""analytical expression for curvature"""
radius = (
3.0
* (
5.0 * (40.0 + 27.0 * epsilon ** 2.0)
+ epsilon
* (
40.0 * (4.0 * np.cos(2.0 * φ) + np.sin(φ))
+ np.cos(φ) * (80.0 + 66.0 * epsilon + 240.0 * np.sin(φ))
- epsilon
* (
10.0 * np.cos(3.0 * φ)
+ 21.0 * np.cos(4.0 * φ)
- 12.0 * np.sin(φ)
+ 20.0 * np.sin(3.0 * φ)
+ 72.0 * np.sin(4.0 * φ)
)
)
)
** (3.0 / 2.0)
/ (
10.0
* np.sqrt(2.0)
* (
200.0
+ 60.0
* epsilon
* (
2.0 * np.cos(φ)
+ 8.0 * np.cos(2.0 * φ)
+ np.sin(φ)
+ 6.0 * np.sin(2.0 * φ)
)
+ epsilon ** 2.0
* (
345.0
+ 165.0 * np.cos(φ)
- 5.0 * np.cos(3.0 * φ)
- 21.0 * np.cos(4.0 * φ)
+ 30.0 * np.sin(φ)
- 10.0 * np.sin(3.0 * φ)
- 72.0 * np.sin(4.0 * φ)
)
)
)
)
return 1 / radius
d = droplets.PerturbedDroplet2D([0, 0], R0, amplitudes=amplitudes)
φs = np.linspace(0, np.pi, 64)
np.testing.assert_allclose(
d.interface_curvature(φs), curvature_analytical(φs), rtol=1e-1
)
def test_from_data():
"""test the from_data constructor"""
for d1 in [
droplets.SphericalDroplet((1,), 2),
droplets.SphericalDroplet((1, 2), 3),
droplets.PerturbedDroplet2D((1, 2), 3, 0.1, [0.1, 0.2]),
droplets.PerturbedDroplet3D((1, 2, 3), 4, 0.1, [0.1, 0.2]),
]:
d2 = d1.__class__.from_data(d1.data)
assert d1 == d2
assert d1 is not d2
@skipUnlessModule("h5py")
def test_triangulation_2d():
"""test the 2d triangulation of droplets"""
d1 = droplets.SphericalDroplet([1, 3], 5)
d2 = droplets.PerturbedDroplet2D([2, 4], 5, amplitudes=[0.1, 0.2, 0.1, 0.2])
for drop in [d1, d2]:
tri = drop.get_triangulation(0.1)
l = sum(
np.linalg.norm(tri["vertices"][i] - tri["vertices"][j])
for i, j in tri["lines"]
)
assert l == pytest.approx(drop.surface_area, rel=1e-3), drop
@skipUnlessModule("h5py")
def test_triangulation_3d():
"""test the 3d triangulation of droplets"""
d1 = droplets.SphericalDroplet([1, 2, 3], 5)
d2 = droplets.PerturbedDroplet3D([2, 3, 4], 5, amplitudes=[0.1, 0.2, 0.1, 0.2])
for drop in [d1, d2]:
tri = drop.get_triangulation(1)
vertices = tri["vertices"]
vol = 0
for a, b, c in tri["triangles"]:
# calculate the total volume by adding the volumes of the tetrahedra
mat = np.c_[vertices[a], vertices[b], vertices[c], drop.position]
mat = np.vstack([mat, np.ones(4)])
vol += abs(np.linalg.det(mat) / 6)
assert vol == pytest.approx(drop.volume, rel=0.1), drop
| 2.53125 | 3 |
netharn/api.py | Erotemic/netharn | 38 | 12766276 | <filename>netharn/api.py
"""
Newest parts of the top-level API
Concepts:
# Netharn API Concepts
TODO: documentation
"""
import ubelt as ub
import torch
from distutils.version import LooseVersion
_TORCH_IS_GE_1_2_0 = LooseVersion(torch.__version__) >= LooseVersion('1.2.0')
class Datasets(object):
@staticmethod
def coerce(config={}, **kw):
"""
Accepts 'datasets', 'train_dataset', 'vali_dataset', and 'test_dataset'.
Args:
config (dict | str): coercable configuration dictionary.
Returns:
Dict: coco_datasets - note these are not torch datasets.
They need to be used with ndsampler.
Examples:
>>> # xdoctest: +REQUIRES(module:ndsampler)
>>> import netharn as nh
>>> config = kw = {'datasets': 'special:shapes'}
>>> print(ub.repr2(nh.api.Datasets.coerce(config, **kw)))
>>> config = kw = {'datasets': 'special:shapes256'}
>>> print(ub.repr2(nh.api.Datasets.coerce(config, **kw)))
"""
from ndsampler import coerce_data
config = _update_defaults(config, kw)
torch_datasets = coerce_data.coerce_datasets(config)
return torch_datasets
class Initializer(object):
"""
Base class for all netharn initializers
"""
def __call__(self, model, *args, **kwargs):
return self.forward(model, *args, **kwargs)
def forward(self, model):
"""
Abstract function that does the initailization
"""
raise NotImplementedError('implement me')
def history(self):
"""
Initializer methods have histories which are short for algorithms and
can be quite long for pretrained models
"""
return None
def get_initkw(self):
"""
Initializer methods have histories which are short for algorithms and
can be quite long for pretrained models
"""
initkw = self.__dict__.copy()
# info = {}
# info['__name__'] = self.__class__.__name__
# info['__module__'] = self.__class__.__module__
# info['__initkw__'] = initkw
return initkw
@staticmethod
def coerce(config={}, **kw):
"""
Accepts 'init', 'pretrained', 'pretrained_fpath', 'leftover', and
'noli'.
Args:
config (dict | str): coercable configuration dictionary.
if config is a string it is taken as the value for "init".
Returns:
Tuple[nh.Initializer, dict]: initializer_ = initializer_cls, kw
Examples:
>>> import netharn as nh
>>> print(ub.repr2(nh.Initializer.coerce({'init': 'noop'})))
(
<class 'netharn.initializers.core.NoOp'>,
{},
)
>>> config = {
... 'init': 'pretrained',
... 'pretrained_fpath': '/fit/nice/untitled'
... }
>>> print(ub.repr2(nh.Initializer.coerce(config)))
(
<class 'netharn.initializers.pretrained.Pretrained'>,
{... 'fpath': '/fit/nice/untitled', 'leftover': None, 'mangle': True},
)
>>> print(ub.repr2(nh.Initializer.coerce({'init': 'kaiming_normal'})))
(
<class 'netharn.initializers.core.KaimingNormal'>,
{'param': 0},
)
"""
import netharn as nh
import six
if isinstance(config, six.string_types):
config = {
'init': config,
}
config = _update_defaults(config, kw)
pretrained_fpath = config.get('pretrained_fpath', config.get('pretrained', None))
init = config.get('initializer', config.get('init', None))
# Allow init to specify a pretrained fpath
if isinstance(init, six.string_types) and pretrained_fpath is None:
from os.path import exists
pretraind_cand = ub.expandpath(init)
if exists(pretraind_cand):
pretrained_fpath = pretraind_cand
config['init'] = init
config['pretrained_fpath'] = pretrained_fpath
config['pretrained'] = pretrained_fpath
if pretrained_fpath is not None:
config['init'] = 'pretrained'
# ---
initializer_ = None
if config['init'].lower() in ['kaiming_normal']:
initializer_ = (nh.initializers.KaimingNormal, {
# initialization params should depend on your choice of
# nonlinearity in your model. See the Kaiming Paper for details.
'param': 1e-2 if config.get('noli', 'relu') == 'leaky_relu' else 0,
})
elif config['init'] == 'noop':
initializer_ = (nh.initializers.NoOp, {})
elif config['init'] == 'pretrained':
initializer_ = (nh.initializers.Pretrained, {
'fpath': ub.expandpath(config['pretrained_fpath']),
'leftover': kw.get('leftover', None),
'mangle': kw.get('mangle', True),
'association': kw.get('association', None),
})
elif config['init'] == 'cls':
# Indicate that the model will initialize itself
# We have to trust that the user does the right thing here.
pass
else:
raise KeyError('Unknown coercable init: {!r}'.format(config['init']))
return initializer_
class Optimizer(object):
@staticmethod
def coerce(config={}, **kw):
"""
Accepts keywords:
optimizer / optim :
can be sgd, adam, adamw, rmsprop
learning_rate / lr :
a float
weight_decay / decay :
a float
momentum:
a float, only used if the optimizer accepts it
Notes:
pip install torch-optimizer
References:
https://datascience.stackexchange.com/questions/26792/difference-between-rmsprop-with-momentum-and-adam-optimizers
https://github.com/jettify/pytorch-optimizer
CommandLine:
xdoctest -m /home/joncrall/code/netharn/netharn/api.py Optimizer.coerce
Example:
>>> config = {'optimizer': 'sgd'}
>>> optim_ = Optimizer.coerce(config)
>>> # xdoctest: +REQUIRES(module:torch_optimizer)
>>> from netharn.api import * # NOQA
>>> config = {'optimizer': 'DiffGrad'}
>>> optim_ = Optimizer.coerce(config)
>>> print('optim_ = {!r}'.format(optim_))
>>> config = {'optimizer': 'Yogi'}
>>> optim_ = Optimizer.coerce(config)
>>> print('optim_ = {!r}'.format(optim_))
>>> from netharn.api import * # NOQA
>>> Optimizer.coerce({'optimizer': 'ASGD'})
TODO:
- [ ] https://pytorch.org/blog/stochastic-weight-averaging-in-pytorch/
"""
import netharn as nh
_update_defaults(config, kw)
key = config.get('optimizer', config.get('optim', 'sgd')).lower()
lr = config.get('learning_rate', config.get('lr', 3e-3))
decay = config.get('weight_decay', config.get('decay', 0))
momentum = config.get('momentum', 0.9)
# TODO: allow for "discriminative fine-tuning"
if key == 'sgd':
optim_ = (torch.optim.SGD, {
'lr': lr,
'weight_decay': decay,
'momentum': momentum,
'nesterov': True,
})
elif key == 'adam':
optim_ = (torch.optim.Adam, {
'lr': lr,
'weight_decay': decay,
# 'betas': (0.9, 0.999),
# 'eps': 1e-8,
# 'amsgrad': False
})
elif key == 'adamw':
if _TORCH_IS_GE_1_2_0:
from torch.optim import AdamW
optim_ = (AdamW, {
'lr': lr,
# 'betas': (0.9, 0.999),
# 'eps': 1e-8,
# 'amsgrad': False
})
else:
optim_ = (nh.optimizers.AdamW, {
'lr': lr,
})
elif key == 'rmsprop':
optim_ = (torch.optim.RMSprop, {
'lr': lr,
'weight_decay': decay,
'momentum': momentum,
'alpha': 0.9,
})
else:
from netharn.util import util_inspect
try:
import torch_optimizer
except Exception:
torch_optimizer = None
_lut = {}
if torch_optimizer is not None:
# known = ['AccSGD', 'AdaBound', 'AdaMod', 'DiffGrad', 'Lamb',
# 'Lookahead', 'NovoGrad', 'RAdam', 'SGDW', 'Yogi']
# if 0:
# for key in known:
# cls = getattr(torch_optimizer, key, None)
# print('cls = {!r}'.format(cls))
# defaultkw = util_inspect.default_kwargs(cls)
# print('defaultkw = {!r}'.format(defaultkw))
# _lut.update({k.lower(): k for k in known})
_lut.update({
k: c.__name__
for k, c in torch_optimizer._NAME_OPTIM_MAP.items()})
_lut.update({
k.lower(): k for k in dir(torch.optim)
if not k.startswith('_')})
key = _lut[key]
cls = getattr(torch.optim, key, None)
if cls is not None:
defaultkw = util_inspect.default_kwargs(cls)
kw = defaultkw.copy()
kw.update()
optim_ = (cls, kw)
else:
if torch_optimizer is None:
raise KeyError(key)
cls = getattr(torch_optimizer, key, None)
if cls is not None:
defaultkw = util_inspect.default_kwargs(cls)
kw = defaultkw.copy()
kw.update()
optim_ = (cls, kw)
else:
raise KeyError(key)
return optim_
class Dynamics(object):
"""
Dynamics are essentially configurations of "tricks" that can be used for
training deep networks.
"""
@staticmethod
def coerce(config={}, **kw):
"""
Kwargs:
bstep / batch_step,
Controls how many batches to process before taking a step in the
gradient direction. Effectively simulates a batch_size that is
`bstep` times bigger.
grad_norm_max:
clips gradients to a max value (mmdet likes to use 35 for this)
grad_norm_type:
p-norm to use if clipping grads.
warmup_iters: EXPERIMENTAL
warmup_ratio: EXPERIMENTAL
Example:
>>> print(Dynamics.coerce({'bstep': 2}))
>>> print(Dynamics.coerce({'batch_step': 3}))
>>> print(Dynamics.coerce({'grad_norm_max': 35}))
"""
config = _update_defaults(config, kw)
_default_dynamics = {
'batch_step': 1, # simulates larger batch sizes
'grad_norm_max': None, # clips gradients to a max value (mmdet likes to use 35 for this)
'grad_norm_type': 2, # p-norm to use if clipping grads.
'warmup_iters': 0, # CURRENTLY HACKED AND EXPERIMENTAL
'warmup_ratio': 1.0 / 3.0, # CURRENTLY HACKED AND EXPERIMENTAL
}
_aliases = {
'batch_step': ['bstep'],
}
dynamics_ = {}
for primary_key, default_value in _default_dynamics.items():
value = default_value
_keys = [primary_key] + _aliases.get(primary_key, [])
for alias_key in _keys:
if alias_key in config:
value = config.get(alias_key, default_value)
break
dynamics_[primary_key] = value
return dynamics_
class Scheduler(object):
def step_batch(self, bx=None):
raise NotImplementedError
def step_epoch(self, epoch=None):
raise NotImplementedError
@staticmethod
def coerce(config={}, **kw):
"""
Accepts keywords:
scheduler / schedule
learning_rate / lr
for scheduler == exponential:
gamma
stepsize
scheduler accepts several special strings which involves a keyword
followed by a special coded string that can be used to modify
parameters. Some examples:
step-10-30-50-100 - multiply LR by 0.1 at every point
onecycle90 - a cyclic scheduler peaking at the epoch 90 // 2
onecycle90-p0.2 - a cyclic scheduler peaking at the int(90 * 0.2)
ReduceLROnPlateau-p2-c2 - a ReduceLROnPlateau scheduler with
a patience of 2 and a cooldown of 2
Exponential-g0.98-s1 - exponential decay of 0.98 every 1-th
epoch
"""
import netharn as nh
import parse
config = _update_defaults(config, kw)
key = config.get('scheduler', config.get('schedule', 'step90'))
lr = config.get('learning_rate', config.get('lr', 3e-3))
if key.startswith('onecycle'):
result = parse.parse('onecycle{:d}-{}', key)
size = result.fixed[0]
suffix = result.fixed[1]
parts = suffix.split('-')
kw = {
'peak': size // 2,
}
try:
for part in parts:
if not part:
continue
if part.startswith('p'):
valstr = part[1:]
if valstr.startswith('0.'):
kw['peak'] = int(size * float(valstr))
else:
kw['peak'] = int(valstr)
else:
raise ValueError('unknown {} part'.format(suffix))
except Exception:
raise ValueError('Unable to parse {} specs: {}'.format(
result, suffix))
scheduler_ = (nh.schedulers.ListedScheduler, {
'points': {
'lr': {
size * 0 : lr * 0.1,
kw['peak'] : lr * 1.0,
size * 1 : lr * 0.01,
size + 1 : lr * 0.001,
},
'momentum': {
size * 0 : 0.95,
kw['peak'] : 0.90,
size * 1 : 0.95,
size + 1 : 0.999,
},
},
})
return scheduler_
prefix = 'step'.lower()
if key.lower().startswith(prefix):
# Allow step to specify `-` separated step points
suffix = key[len(prefix):]
param_parts = suffix.split('-')
if param_parts and param_parts[-1].startswith('f'):
factor = float(param_parts[-1][1:])
param_parts = param_parts[:-1]
else:
factor = 10
points = [int(p) for p in param_parts if p]
assert sorted(points) == points, 'points must be in order'
lr_pts = {0: lr}
for i, epoch in enumerate(points, start=1):
lr_pts[epoch] = lr / (factor ** i)
scheduler_ = (nh.schedulers.ListedScheduler, {
'points': {
'lr': lr_pts,
},
'interpolation': 'left'
})
return scheduler_
prefix = 'ReduceLROnPlateau'.lower()
if key.lower().startswith(prefix):
# Allow specification of scheduler params
suffix = key[len(prefix):]
parts = suffix.split('-')
kw = {
'patience': 10,
'cooldown': 0,
'factor': 0.1,
}
try:
for part in parts:
if not part:
continue
if part.startswith('f'):
kw['factor'] = float(part[1:])
elif part.startswith('p'):
kw['patience'] = int(part[1:])
elif part.startswith('c'):
kw['cooldown'] = int(part[1:])
else:
raise ValueError('unknown {} part'.format(prefix))
except Exception:
raise ValueError('Unable to parse {} specs: {}'.format(
prefix, suffix))
scheduler_ = (torch.optim.lr_scheduler.ReduceLROnPlateau, kw)
return scheduler_
prefix = 'Exponential'.lower()
if key.lower().startswith(prefix):
# Allow specification of scheduler params
suffix = key[len(prefix):]
parts = suffix.split('-')
kw = {
'gamma': config.get('gamma', 0.1),
'stepsize': config.get('stepsize', 100),
}
try:
for part in parts:
if not part:
continue
if part.startswith('g'):
kw['gamma'] = float(part[1:])
elif part.startswith('s'):
kw['stepsize'] = int(part[1:])
else:
raise ValueError('unknown {} part'.format(prefix))
except Exception:
raise ValueError('Unable to parse {} specs: {}'.format(
prefix, suffix))
scheduler_ = (nh.schedulers.Exponential, kw)
return scheduler_
raise KeyError(key)
class Loaders(object):
@staticmethod
def coerce(datasets, config={}, **kw):
config = _update_defaults(config, kw)
loaders = {}
for key, dset in datasets.items():
if hasattr(dset, 'make_loader'):
loaders[key] = dset.make_loader(
batch_size=config['batch_size'],
num_workers=config['workers'], shuffle=(key == 'train'),
pin_memory=True)
else:
loaders[key] = torch.utils.data.DataLoader(
dset, batch_size=config['batch_size'], num_workers=config['workers'],
shuffle=(key == 'train'), pin_memory=True)
return loaders
class Criterion(object):
@staticmethod
def coerce(config={}, **kw):
"""
Accepts keywords:
criterion / loss - one of: (
contrastive, focal, triplet, cross_entropy, mse)
"""
raise NotImplementedError
def configure_hacks(config={}, **kw):
"""
Configures hacks to fix global settings in external modules
Args:
config (dict): exected to contain they key "workers" with an
integer value equal to the number of dataloader processes.
**kw: can also be used to specify config items
Modules we currently hack:
* cv2 - fix thread count
"""
config = _update_defaults(config, kw)
if config['workers'] > 0:
import cv2
cv2.setNumThreads(0)
def configure_workdir(config={}, **kw):
config = _update_defaults(config, kw)
if config['workdir'] is None:
config['workdir'] = kw['workdir']
workdir = config['workdir'] = ub.expandpath(config['workdir'])
ub.ensuredir(workdir)
return workdir
def _update_defaults(config, kw):
config = dict(config)
for k, v in kw.items():
if k not in config:
config[k] = v
return config
| 2.515625 | 3 |
dsopz/oauth.py | artooro/dsopz | 34 | 12766277 | import oauth_local
import oauth_installed
import oauth_gce
import oauth_serviceaccount
import sys
import time
import http
import json
import oauth_base
class Error(Exception):
"""Exceptions"""
def resolve(t):
if t == 'installed':
return oauth_installed
elif t == 'local':
return oauth_local
elif t == 'gce':
return oauth_gce
elif t == 'serviceaccount':
return oauth_serviceaccount
raise Error('Unknown: %s' % (t))
def get_token():
auth = oauth_base.read_file()
if not auth:
raise Error('You need to login')
now = now = int(time.time())
handler = resolve(auth['handler'])
if now > auth['expires'] - 60:
handler.refresh_token(auth)
auth = oauth_base.read_file()
if not auth:
raise Error('You need to login')
return auth['access_token']
def oauth_req_json(method, url, params = None, headers = {}, expects = [200]):
return oauth_async_req_json(method, url, params, headers, expects).resp()
def oauth_async_req_json(method, url, params = None, headers = {}, expects = [200]):
headers['Authorization'] = 'Bearer %s' % get_token()
if params:
params = json.dumps(params)
headers['Content-type'] = 'application/json; charset=UTF-8'
return http.async_req_json(method, url, params, headers, expects)
def argparse_prepare(sub):
""" ok """
def argparse_exec(args):
print get_token()
def __main():
print get_token()
if __name__ == '__main__':
__main()
| 2.609375 | 3 |
utils/utilities.py | cookielanguage/IDE | 3 | 12766278 | <reponame>cookielanguage/IDE
def center(win, width=100, height=100):
win.update_idletasks()
width = width
frm_width = win.winfo_rootx() - win.winfo_x()
win_width = width + 2 * frm_width
height = height
titlebar_height = win.winfo_rooty() - win.winfo_y()
win_height = height + titlebar_height + frm_width
x = win.winfo_screenwidth() // 2 - win_width // 2
y = win.winfo_screenheight() // 2 - win_height // 2
win.geometry('{}x{}+{}+{}'.format(width, height, x, y))
win.deiconify() | 3.03125 | 3 |
admin_page.py | Codemonk-adi/Billing | 0 | 12766279 | <reponame>Codemonk-adi/Billing
from tkinter import Button, Entry, Label, LabelFrame, Listbox
from tkinter.constants import END, OUTSIDE, RAISED
import pages
from database import Database
from helpers import *
class page_admin(pages.Page):
"""Page to add aliases and do other admin stuff."""
def __init__(self):
super().__init__()
self.db = Database()
F1 = LabelFrame(self,text="Aliases",relief='raised',bg='#f8edeb' , fg='#264653', font=("Calibri",12,"bold"))
F1.place(x=0,y=0,relwidth=1,relheight=0.4)
Label(F1, text="Category",bg='#f8edeb' , fg='#264653' , font=("Calibri", 15, "bold")).grid(
row=0, column=0, padx=20)
self.category_en = Entry(F1)
self.category_en.grid(row=1, column=0, ipady=4, ipadx=30, pady=2)
AC=AutocompleteCombobox(self.category_en,self.fill_alias)
AC.set_completion_list(self.db.get_category_lists_s)
Label(F1, text="Alias",bg='#f8edeb' , fg='#264653' , font=("Calibri", 15, "bold")).grid(
row=0, column=1, padx=20)
self.aliases_en = Entry(F1)
self.aliases_en.grid(row=1, column=1, ipady=4, ipadx=30, pady=2)
self.aliases_en_l = Listbox(F1)
self.aliases_en_l.place(in_=self.aliases_en,relx=0,rely=1,relwidth=1,y=5)
add_alias_btn = Button(F1,text='Add Alias',bg='#f8edeb' , fg='#264653', font=("lucida", 12, "bold"), bd=7, relief=RAISED,command=self.add_alias)
add_alias_btn.grid(row=2,column=2,ipady=4, ipadx=30, pady=2)
add_cat_btn = Button(F1,text='Add Category',bg='#f8edeb' , fg='#264653', font=("lucida", 12, "bold"), bd=7, relief=RAISED,command=self.add_category)
add_cat_btn.grid(row=3,column=2,ipady=4, ipadx=30, pady=2)
def add_alias(self):
self.db.insert_alias(self._id,self.aliases_en.get())
self.fill_alias(self._id)
def fill_alias(self,_id):
self._id = _id
aliases = self.db.get_aliases(_id)
self.aliases_en_l.delete(0,END)
for alias in aliases:
self.aliases_en_l.insert(END,alias)
def add_category(self):
self.db.insert_categories(self.category_en.get()) | 2.46875 | 2 |
src/commands.py | lenkozina/junior | 0 | 12766280 | <reponame>lenkozina/junior
import click
from flask.cli import with_appcontext
from src.extensions import db
from src.qa.models import Answer, Chapter, Question, Section
from src.user import User
from src.uttils import load_fixture
@click.command()
@with_appcontext
def clear_questions():
"""Clear all Sections, Chapters, Questions."""
Answer.query.delete()
Question.query.delete()
Chapter.query.delete()
Section.query.delete()
db.session.commit()
print('DB cleared successfully') # noqa TOO1
@click.command()
@click.argument('fixture_name')
@with_appcontext
def load_section_questions(fixture_name: str):
"""
Load selected Section, related Chapters and Questions.
Requires YML filename without '-questions.yml'
Example:
flask load-section-questions bars
"""
fixtures: dict = load_fixture(f'{fixture_name}-questions.yml')
db.session.add_all(
Chapter(**chapter_fixture)
for chapter_fixture in fixtures['chapters']
)
db.session.add_all(
Question(**question_fixture)
for question_fixture in fixtures['questions']
)
db.session.add_all(
Section(**section_fixture)
for section_fixture in fixtures['sections']
)
db.session.commit()
print(f'{fixture_name}: chapters and questions successful load in database.') # noqa TOO1
@click.option('-l', '--login')
@click.option('-p', '--password')
@click.option('-e', '--email')
@click.command()
@with_appcontext
def create_admin_user(login, password, email):
password = User.hash_password(password)
user = User(
login=login,
email=email,
password=password.decode(),
is_superuser=True,
is_aproved=True,
)
User.save(user)
print(f"Admin user created! {login} {email}.") # noqa TOO1
| 2.453125 | 2 |
notes/OOBall/ooballdemo.py | KRHS-GameProgramming-2015/Manpac | 0 | 12766281 | <filename>notes/OOBall/ooballdemo.py
import sys, pygame, math, random
from Ball import *
pygame.init()
clock = pygame.time.Clock()
width = 800
height = 600
size = width, height
bgColor = r,g,b = 0, 0, 0
screen = pygame.display.set_mode(size)
balls = []
ballTimer = 0
ballTimerMax = .5 * 60
ballImages = ["An Actual Ghost 3.png",
"An Actual Ghost 2.png",
"An Actual Ghost.png",
"An Actual Ghost 4.png"]
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
ballTimer += 1
if ballTimer >= ballTimerMax:
ballTimer = 0
ballSpeed = [random.randint(-5, 5),
random.randint(-5, 5)]
ballPos = [random.randint(100, width-100),
random.randint(100, height-100)]
ballImage = ballImages[random.randint(0, len(ballImages)-1)]
balls += [Ball(ballImage, ballSpeed, ballPos)]
for ball in balls:
ball.move()
ball.collideScreen(size)
for first in balls:
for second in balls:
if first != second:
first.collideBall(second)
bgColor = r,g,b
screen.fill(bgColor)
for ball in balls:
screen.blit(ball.image, ball.rect)
pygame.display.flip()
clock.tick(60)
| 3.171875 | 3 |
tests/test_schema.py | ketgo/marshmallow-pyspark | 7 | 12766282 | """
Unit tests for Schema
"""
import datetime
import json
import pytest
from marshmallow import fields
from pyspark.sql.types import *
from pyspark.sql import Row
from marshmallow_pyspark.constants import *
from marshmallow_pyspark.schema import Schema, _RowValidator
def test_create():
schema = Schema()
assert schema.error_column_name == DEFAULT_ERRORS_COLUMN
assert schema.split_errors == DEFAULT_SPLIT_ERRORS
@pytest.mark.parametrize("ma_field, spark_field", [
(fields.String(), StringType()),
(fields.DateTime(), TimestampType()),
(fields.Date(), DateType()),
(fields.Boolean(), BooleanType()),
(fields.Integer(), IntegerType()),
(fields.Number(), DoubleType()),
(fields.List(fields.String()), ArrayType(StringType())),
(fields.Nested(Schema.from_dict({"name": fields.String()})), StructType([StructField("name", StringType())]))
])
def test_spark_schema(ma_field, spark_field):
class TestSchema(Schema):
test_column = ma_field
spark_schema = StructType(
[
StructField("test_column", spark_field, nullable=True),
StructField(DEFAULT_ERRORS_COLUMN, StringType(), nullable=True)
]
)
schema = TestSchema()
assert schema.spark_schema == spark_schema
@pytest.mark.parametrize("schema, input_data, valid_rows, invalid_rows", [
(
Schema.from_dict({
"name": fields.String(required=True),
"age": fields.Integer(required=True),
"expenses": fields.Float(required=True),
"employed": fields.Boolean(required=True)
}),
[
{"name": "valid_1", "age": "40", "expenses": "43.5", "employed": "True"},
{"name": "valid_2", "age": "32", "expenses": "30.5", "employed": "False"},
{"name": "invalid_1", "age": "32.05", "expenses": "30.5", "employed": "False"},
{"name": "invalid_2", "age": "32", "expenses": "thirty", "employed": "False"},
{"name": "invalid_3", "age": "32", "expenses": "30.5", "employed": "Fa"},
],
[
{"name": "valid_1", "age": 40, "expenses": 43.5, "employed": True},
{"name": "valid_2", "age": 32, "expenses": 30.5, "employed": False},
],
[
{"name": "invalid_1", "age": "32.05", "expenses": "30.5", "employed": "False"},
{"name": "invalid_2", "age": "32", "expenses": "thirty", "employed": "False"},
{"name": "invalid_3", "age": "32", "expenses": "30.5", "employed": "Fa"},
]
),
(
Schema.from_dict({
"name": fields.String(required=True),
"date": fields.Date(required=True),
"date_time": fields.DateTime(required=True),
}),
[
{"name": "valid_1", "date": "1970-10-15", "date_time": "1970-10-15 01:00:00"},
{"name": "invalid_1", "date": "1970-10-15 00:00:00", "date_time": "1970-10-15"},
],
[
{"name": "valid_1",
"date": datetime.date(1970, 10, 15),
"date_time": datetime.datetime(1970, 10, 15, 1, 0)},
],
[
{"name": "invalid_1", "date": "1970-10-15 00:00:00", "date_time": "1970-10-15"},
]
),
(
Schema.from_dict({
"name": fields.String(required=True),
"book": fields.Nested(
Schema.from_dict({
"author": fields.String(required=True),
"title": fields.String(required=True),
"cost": fields.Number(required=True)
})
)
}),
[
{"name": "valid_1", "book": {"author": "Sam", "title": "Sam's Book", "cost": "32.5"}},
{"name": "invalid_1", "book": {"author": "Sam", "title": "Sam's Book", "cost": "32a"}},
],
[
{"name": "valid_1", "book": {"author": "Sam", "title": "Sam's Book", "cost": 32.5}},
],
[
{"name": "invalid_1", "book": {"author": "Sam", "title": "Sam's Book", "cost": "32a"}},
]
)
])
def test_validate_df(spark_session, schema, input_data, valid_rows, invalid_rows):
input_df = spark_session.createDataFrame(input_data)
# Test with split
valid_df, errors_df = schema().validate_df(input_df)
_valid_rows = [row.asDict(recursive=True) for row in valid_df.collect()]
assert _valid_rows == valid_rows
error_rows = [json.loads(row[DEFAULT_ERRORS_COLUMN]) for row in errors_df.collect()]
assert [row["row"] for row in error_rows] == invalid_rows
@pytest.mark.parametrize("schema, input_data, valid_rows, invalid_rows", [
(
Schema.from_dict({
"name": fields.String(required=True),
"age": fields.Integer(required=True),
"expenses": fields.Float(required=True),
"employed": fields.Boolean(required=True)
}),
[
{"name": "valid_1", "age": "40", "expenses": "43.5", "employed": "True"},
{"name": "valid_2", "age": "32", "expenses": "30.5", "employed": "False"},
{"name": "invalid_1", "age": "32.05", "expenses": "30.5", "employed": "False"},
{"name": "invalid_2", "age": "32", "expenses": "thirty", "employed": "False"},
{"name": "invalid_3", "age": "32", "expenses": "30.5", "employed": "Fa"},
],
[
{"name": "valid_1", "age": 40, "expenses": 43.5, "employed": True},
{"name": "valid_2", "age": 32, "expenses": 30.5, "employed": False},
],
[
{"name": "invalid_1", "age": "32.05", "expenses": "30.5", "employed": "False"},
{"name": "invalid_2", "age": "32", "expenses": "thirty", "employed": "False"},
{"name": "invalid_3", "age": "32", "expenses": "30.5", "employed": "Fa"},
]
),
(
Schema.from_dict({
"name": fields.String(required=True),
"date": fields.Date(required=True),
"date_time": fields.DateTime(required=True),
}),
[
{"name": "valid_1", "date": "1970-10-15", "date_time": "1970-10-15 01:00:00"},
{"name": "invalid_1", "date": "1970-10-15 00:00:00", "date_time": "1970-10-15"},
],
[
{"name": "valid_1",
"date": datetime.date(1970, 10, 15),
"date_time": datetime.datetime(1970, 10, 15, 1, 0)},
],
[
{"name": "invalid_1", "date": "1970-10-15 00:00:00", "date_time": "1970-10-15"},
]
),
(
Schema.from_dict({
"name": fields.String(required=True),
"book": fields.Nested(
Schema.from_dict({
"author": fields.String(required=True),
"title": fields.String(required=True),
"cost": fields.Number(required=True)
})
)
}),
[
{"name": "valid_1", "book": {"author": "Sam", "title": "Sam's Book", "cost": "32.5"}},
{"name": "invalid_1", "book": {"author": "Sam", "title": "Sam's Book", "cost": "32a"}},
],
[
{"name": "valid_1", "book": {"author": "Sam", "title": "Sam's Book", "cost": 32.5}},
],
[
{"name": "invalid_1", "book": {"author": "Sam", "title": "Sam's Book", "cost": "32a"}},
]
)
])
def test_validate_df_no_split(spark_session, schema, input_data, valid_rows, invalid_rows):
input_df = spark_session.createDataFrame(input_data)
# Test without split
valid_df, errors_df = schema(split_errors=False).validate_df(input_df)
assert errors_df is None
_valid_rows = [row.asDict(recursive=True) for row in valid_df.collect()]
for row in valid_rows:
row[DEFAULT_ERRORS_COLUMN] = None
assert all(row in _valid_rows for row in valid_rows)
def test_add_duplicate_counts(spark_session):
# Single unique column test
input_data = [
{"title": "valid_1", "release_date": "2020-1-10"},
{"title": "invalid_1", "release_date": "2020-1-11"},
{"title": "invalid_1", "release_date": "2020-31-11"},
{"title": "invalid_2", "release_date": "2020-1-51"},
]
input_df = spark_session.createDataFrame(input_data)
class TestSchema(Schema):
UNIQUE = ["title"]
title = fields.Str()
release_date = fields.Date()
df = TestSchema()._add_duplicate_counts(input_df)
rows = [row.asDict(recursive=True) for row in df.collect()]
assert rows == [
{'release_date': '2020-1-11', 'title': 'invalid_1', '__count__title': 1},
{'release_date': '2020-31-11', 'title': 'invalid_1', '__count__title': 2},
{'release_date': '2020-1-51', 'title': 'invalid_2', '__count__title': 1},
{'release_date': '2020-1-10', 'title': 'valid_1', '__count__title': 1}
]
# Compound unique column test
input_data = [
{"title": "valid_1", "release_date": "2020-1-10"},
{"title": "invalid_1", "release_date": "2020-1-11"},
{"title": "invalid_1", "release_date": "2020-31-11"},
{"title": "invalid_2", "release_date": "2020-1-51"},
{"title": "invalid_2", "release_date": "2020-1-51"},
]
input_df = spark_session.createDataFrame(input_data)
class TestSchema(Schema):
UNIQUE = [["title", "release_date"]]
title = fields.Str()
release_date = fields.Date()
df = TestSchema()._add_duplicate_counts(input_df)
rows = [row.asDict(recursive=True) for row in df.collect()]
assert rows == [
{'release_date': '2020-1-11', 'title': 'invalid_1', '__count__title~release_date': 1},
{'release_date': '2020-31-11', 'title': 'invalid_1', '__count__title~release_date': 1},
{'release_date': '2020-1-51', 'title': 'invalid_2', '__count__title~release_date': 1},
{'release_date': '2020-1-51', 'title': 'invalid_2', '__count__title~release_date': 2},
{'release_date': '2020-1-10', 'title': 'valid_1', '__count__title~release_date': 1}
]
# Multiple unique columns test
input_data = [
{"title": "valid_1", "release_date": "2020-1-10"},
{"title": "invalid_1", "release_date": "2020-1-11"},
{"title": "invalid_1", "release_date": "2020-31-11"},
{"title": "invalid_2", "release_date": "2020-1-51"},
{"title": "invalid_2", "release_date": "2020-1-51"},
]
input_df = spark_session.createDataFrame(input_data)
class TestSchema(Schema):
UNIQUE = ["title", "release_date"]
title = fields.Str()
release_date = fields.Date()
df = TestSchema()._add_duplicate_counts(input_df)
rows = [row.asDict(recursive=True) for row in df.collect()]
assert rows == [
{'release_date': '2020-1-10', 'title': 'valid_1', '__count__title': 1, '__count__release_date': 1},
{'release_date': '2020-1-11', 'title': 'invalid_1', '__count__title': 1, '__count__release_date': 1},
{'release_date': '2020-1-51', 'title': 'invalid_2', '__count__title': 1, '__count__release_date': 1},
{'release_date': '2020-1-51', 'title': 'invalid_2', '__count__title': 2, '__count__release_date': 2},
{'release_date': '2020-31-11', 'title': 'invalid_1', '__count__title': 2, '__count__release_date': 1}
]
def test_validate_df_with_duplicates(spark_session):
# Single unique column test
input_data = [
{"title": "title_1", "release_date": "2020-1-10"},
{"title": "title_2", "release_date": "2020-1-11"},
{"title": "title_2", "release_date": "2020-3-11"},
{"title": "title_3", "release_date": "2020-1-51"},
]
input_df = spark_session.createDataFrame(input_data)
class TestSchema(Schema):
UNIQUE = ["title"]
title = fields.Str()
release_date = fields.Date()
valid_df, errors_df = TestSchema().validate_df(input_df)
valid_rows = [row.asDict(recursive=True) for row in valid_df.collect()]
error_rows = [row.asDict(recursive=True) for row in errors_df.collect()]
assert valid_rows == [
{'title': 'title_1', 'release_date': datetime.date(2020, 1, 10)},
{'title': 'title_2', 'release_date': datetime.date(2020, 1, 11)}
]
assert error_rows == [
{'_errors': '{"row": {"release_date": "2020-3-11", "title": "title_2", "__count__title": 2}, '
'"errors": ["duplicate row"]}'},
{'_errors': '{"row": {"release_date": "2020-1-51", "title": "title_3", "__count__title": 1}, '
'"errors": {"release_date": ["Not a valid date."]}}'}
]
# Compound unique column test
input_data = [
{"title": "title_1", "release_date": "2020-1-10"},
{"title": "title_2", "release_date": "2020-1-11"},
{"title": "title_2", "release_date": "2020-3-11"},
{"title": "title_3", "release_date": "2020-1-21"},
{"title": "title_3", "release_date": "2020-1-21"},
{"title": "title_4", "release_date": "2020-1-51"},
]
input_df = spark_session.createDataFrame(input_data)
class TestSchema(Schema):
UNIQUE = [["title", "release_date"]]
title = fields.Str()
release_date = fields.Date()
valid_df, errors_df = TestSchema().validate_df(input_df)
valid_rows = [row.asDict(recursive=True) for row in valid_df.collect()]
error_rows = [row.asDict(recursive=True) for row in errors_df.collect()]
assert valid_rows == [
{'title': 'title_1', 'release_date': datetime.date(2020, 1, 10)},
{'title': 'title_2', 'release_date': datetime.date(2020, 1, 11)},
{'title': 'title_2', 'release_date': datetime.date(2020, 3, 11)},
{'title': 'title_3', 'release_date': datetime.date(2020, 1, 21)}
]
assert error_rows == [
{'_errors': '{"row": {"release_date": "2020-1-21", "title": "title_3", "__count__title~release_date": 2}, '
'"errors": ["duplicate row"]}'},
{'_errors': '{"row": {"release_date": "2020-1-51", "title": "title_4", "__count__title~release_date": 1}, '
'"errors": {"release_date": ["Not a valid date."]}}'}
]
# Multiple unique columns test
input_data = [
{"title": "title_1", "release_date": "2020-1-10"},
{"title": "title_2", "release_date": "2020-1-11"},
{"title": "title_2", "release_date": "2020-3-11"},
{"title": "title_3", "release_date": "2020-1-21"},
{"title": "title_3", "release_date": "2020-1-21"},
{"title": "title_4", "release_date": "2020-1-51"},
]
input_df = spark_session.createDataFrame(input_data)
class TestSchema(Schema):
UNIQUE = ["title", "release_date"]
title = fields.Str()
release_date = fields.Date()
valid_df, errors_df = TestSchema().validate_df(input_df)
valid_rows = [row.asDict(recursive=True) for row in valid_df.collect()]
error_rows = [row.asDict(recursive=True) for row in errors_df.collect()]
assert valid_rows == [
{'title': 'title_1', 'release_date': datetime.date(2020, 1, 10)},
{'title': 'title_2', 'release_date': datetime.date(2020, 1, 11)},
{'title': 'title_3', 'release_date': datetime.date(2020, 1, 21)}
]
assert error_rows == [
{'_errors': '{"row": {"release_date": "2020-1-21", "title": "title_3", '
'"__count__title": 2, "__count__release_date": 2}, '
'"errors": ["duplicate row"]}'},
{'_errors': '{"row": {"release_date": "2020-1-51", "title": "title_4", '
'"__count__title": 1, "__count__release_date": 1}, '
'"errors": {"release_date": ["Not a valid date."]}}'},
{'_errors': '{"row": {"release_date": "2020-3-11", "title": "title_2", '
'"__count__title": 2, "__count__release_date": 1}, '
'"errors": ["duplicate row"]}'}
]
def test_validate_df_invalid_unique(spark_session):
# Single unique column test
input_data = [
{"title": "title_1", "release_date": "2020-1-10"},
{"title": "title_2", "release_date": "2020-1-11"},
{"title": "title_2", "release_date": "2020-3-11"},
{"title": "title_3", "release_date": "2020-1-51"},
]
input_df = spark_session.createDataFrame(input_data)
class TestSchema(Schema):
UNIQUE = ["title_fake"]
title = fields.Str()
release_date = fields.Date()
with pytest.raises(ValueError):
TestSchema().validate_df(input_df)
# Compound unique column test
input_data = [
{"title": "title_1", "release_date": "2020-1-10"},
{"title": "title_2", "release_date": "2020-1-11"},
{"title": "title_2", "release_date": "2020-3-11"},
{"title": "title_3", "release_date": "2020-1-21"},
{"title": "title_3", "release_date": "2020-1-21"},
{"title": "title_4", "release_date": "2020-1-51"},
]
input_df = spark_session.createDataFrame(input_data)
class TestSchema(Schema):
UNIQUE = [["title", "date"]]
title = fields.Str()
release_date = fields.Date()
with pytest.raises(ValueError):
TestSchema().validate_df(input_df)
# Multiple unique columns test
input_data = [
{"title": "title_1", "release_date": "2020-1-10"},
{"title": "title_2", "release_date": "2020-1-11"},
{"title": "title_2", "release_date": "2020-3-11"},
{"title": "title_3", "release_date": "2020-1-21"},
{"title": "title_3", "release_date": "2020-1-21"},
{"title": "title_4", "release_date": "2020-1-51"},
]
input_df = spark_session.createDataFrame(input_data)
class TestSchema(Schema):
UNIQUE = ["title", "_date"]
title = fields.Str()
release_date = fields.Date()
with pytest.raises(ValueError):
TestSchema().validate_df(input_df)
def test_row_validator():
input_data = [
{"title": "valid_1", "release_date": "2020-1-10", "timestamp": datetime.datetime(2021, 5, 5)},
{"title": "valid_2", "release_date": "2020-1-11", "timestamp": datetime.datetime(2021, 5, 5)},
{"title": "invalid_1", "release_date": "2020-31-11", "timestamp": datetime.datetime(2021, 5, 5)},
{"title": "invalid_2", "release_date": "2020-1-51", "timestamp": datetime.datetime(2021, 5, 5)},
]
class TestSchema(Schema):
title = fields.Str()
release_date = fields.Date()
timestamp = fields.Raw(spark_type=DateType())
validator = _RowValidator(TestSchema(), DEFAULT_ERRORS_COLUMN, [])
validated_data = [validator.validate_row(Row(**x)) for x in input_data]
for row in validated_data:
if '_errors' in row:
row['_errors'] = json.loads(row['_errors'])
assert validated_data == [
{
'release_date': datetime.date(2020, 1, 10),
'timestamp': datetime.datetime(2021, 5, 5, 0, 0),
'title': 'valid_1'
},
{
'release_date': datetime.date(2020, 1, 11),
'timestamp': datetime.datetime(2021, 5, 5, 0, 0),
'title': 'valid_2'
},
{'_errors': {"row": {
"release_date": "2020-31-11",
'timestamp': '2021-05-05 00:00:00',
"title": "invalid_1"
},
"errors": {"release_date": ["Not a valid date."]}}},
{'_errors': {"row": {
"release_date": "2020-1-51",
'timestamp': '2021-05-05 00:00:00',
"title": "invalid_2"
},
"errors": {"release_date": ["Not a valid date."]}}}
]
def test_row_validator_with_duplicates():
input_data = [
{"title": "title_1", "release_date": "2020-1-10", '__count__title': 1},
{"title": "title_2", "release_date": "2020-1-11", '__count__title': 1},
{"title": "title_2", "release_date": "2020-3-11", '__count__title': 2},
{"title": "title_3", "release_date": "2020-1-51", '__count__title': 1},
]
class TestSchema(Schema):
UNIQUE = ["title"]
title = fields.Str()
release_date = fields.Date()
validator = _RowValidator(TestSchema(), DEFAULT_ERRORS_COLUMN, TestSchema.UNIQUE)
validated_data = [validator.validate_row(Row(**x)) for x in input_data]
for row in validated_data:
if '_errors' in row:
row['_errors'] = json.loads(row['_errors'])
assert validated_data == [
{'release_date': datetime.date(2020, 1, 10), 'title': 'title_1'},
{'release_date': datetime.date(2020, 1, 11), 'title': 'title_2'},
{'_errors': {"row": {"__count__title": 2, "release_date": "2020-3-11", "title": "title_2"},
"errors": ["duplicate row"]}},
{'_errors': {"row": {"__count__title": 1, "release_date": "2020-1-51", "title": "title_3"},
"errors": {"release_date": ["Not a valid date."]}}}
]
| 2.828125 | 3 |
pong.py | paulone1/pong | 0 | 12766283 | import pygame, random
def ball_animation():
global ball_speed_x, ball_speed_y, left_player_score, right_player_score, score_time
ball.x += ball_speed_x
ball.y += ball_speed_y
if ball.top <= 0 or ball.bottom >= screen_height:
ball_speed_y *= -1
# Left Player Score
if ball.right <= 0:
score_time = pygame.time.get_ticks()
left_player_score += 1
# right player Score
if ball.left >= screen_width:
score_time = pygame.time.get_ticks()
right_player_score += 1
if ball.colliderect(left_player) and ball_speed_x < 0:
if abs(ball.right - left_player.right) < 10:
ball_speed_x *= -1
elif abs(ball.bottom - left_player.top) < 10 and ball_speed_y > 0:
ball_speed_y *= -1
elif abs(ball.top - left_player.bottom) < 10 and ball_speed_y < 0:
ball_speed_y *= -1
if ball.colliderect(right_player) and ball_speed_x > 0:
if abs(ball.left - right_player.left) < 10:
ball_speed_x *= -1
elif abs(ball.bottom - right_player.top) < 10 and ball_speed_y > 0:
ball_speed_y *= -1
elif abs(ball.top - right_player.bottom) < 10 and ball_speed_y < 0:
ball_speed_y *= -1
def player_animation():
keys = pygame.key.get_pressed()
if keys[pygame.K_DOWN] and right_player.bottom + PLAYER_VELOSITY < screen_height:
right_player.y += PLAYER_VELOSITY
if keys[pygame.K_UP] and right_player.top - PLAYER_VELOSITY > 0:
right_player.y -= PLAYER_VELOSITY
if keys[pygame.K_s] and left_player.bottom + PLAYER_VELOSITY < screen_height:
left_player.y += PLAYER_VELOSITY
if keys[pygame.K_w] and left_player.top - PLAYER_VELOSITY > 0:
left_player.y -= PLAYER_VELOSITY
def ball_start():
global ball_speed_x, ball_speed_y, ball_moving, score_time
ball.center = (screen_width//2, screen_height//2)
current_time = pygame.time.get_ticks()
if current_time - score_time < 700:
number_three = basic_font.render("3",False,WHITE)
screen.blit(number_three,(screen_width/2 - 10, screen_height/2 + 20))
if 700 < current_time - score_time < 1400:
number_two = basic_font.render("2",False,WHITE)
screen.blit(number_two,(screen_width/2 - 10, screen_height/2 + 20))
if 1400 < current_time - score_time < 2100:
number_one = basic_font.render("1",False,WHITE)
screen.blit(number_one,(screen_width/2 - 10, screen_height/2 + 20))
if current_time - score_time < 2100:
ball_speed_y, ball_speed_x = 0,0
else:
ball_speed_x = 7 * random.choice((1,-1))
ball_speed_y = 7 * random.choice((1,-1))
score_time = None
def draw_winner(text):
draw_text = WINNER_FONT.render(text, 1, WHITE)
screen.blit(draw_text, (screen_width/2 - draw_text.get_width() /
2, screen_height/2 - draw_text.get_height()/2))
pygame.display.update()
pygame.time.delay(5000)
# General setup
pygame.mixer.pre_init(44100,-16,1, 1024)
pygame.init()
clock = pygame.time.Clock()
# Main Window
screen_width = 1280
screen_height = 960
screen = pygame.display.set_mode((screen_width,screen_height))
pygame.display.set_caption('Pong')
# Colors
WHITE = (255,255,255)
# Game Rectangles
ball = pygame.Rect(screen_width // 2 - 10, screen_height // 2 - 10, 20, 20)
right_player = pygame.Rect(screen_width - 30, screen_height // 2 - 70, 20,100)
left_player = pygame.Rect(10, screen_height // 2 - 70, 20,100)
# Game Variables
ball_speed_x = 7 * random.choice((1,-1))
ball_speed_y = 7 * random.choice((1,-1))
FPS = 60
PLAYER_VELOSITY = 6
ball_moving = False
score_time = True
# Score Text
left_player_score = 0
right_player_score = 0
basic_font = pygame.font.SysFont('comicsans', 40)
WINNER_FONT = pygame.font.SysFont('comicsans', 100)
run = True
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
#Game Logic
ball_animation()
player_animation()
# Visuals
screen.fill(0)
pygame.draw.rect(screen, WHITE, left_player)
pygame.draw.rect(screen, WHITE, right_player)
pygame.draw.rect(screen, WHITE, ball)
pygame.draw.line(screen, WHITE, (screen_width / 2, 0),(screen_width / 2, screen_height), 5)
if score_time:
ball_start()
left_player_text = basic_font.render(f'{left_player_score}',False,WHITE)
screen.blit(left_player_text,(screen_width // 2 + 30, 10))
right_player_text = basic_font.render(f'{right_player_score}',False,WHITE)
screen.blit(right_player_text,(screen_width // 2 - 30, 10))
winner_text = ""
if left_player_score == 5:
winner_text = "Left Wins!"
if right_player_score == 5:
winner_text = "Right Wins!"
if winner_text != "":
draw_winner(winner_text)
break
pygame.display.flip()
clock.tick(FPS)
pygame.quit()
| 3.34375 | 3 |
model.py | AbinavRavi/VAE-KL | 0 | 12766284 | <gh_stars>0
import numpy as np
import torch
import torch.nn as nn
import torch.distributions as dist
class Encoder(nn.Module):
def __init__(self,input_size):
super(Encoder,self).__init__()
self.conv1 = nn.Conv2d(input_size,16,kernel_size =4,stride=2,padding=1,bias = False)
self.conv2 = nn.Conv2d(16,64,kernel_size=4,stride=2,padding=1,bias = False)
self.conv3 = nn.Conv2d(64,128,kernel_size=4,stride=2,padding=1,bias = False)
self.conv4 = nn.Conv2d(128,256,kernel_size=4,stride=1,bias = False)
# nn.InstanceNorm2d = nn.InstanceNorm2d()
self.lrelu = nn.LeakyReLU(inplace=True)
def forward(self,x):
x = self.conv1(x)
# x = nn.BatchNorm2d(nn.InstanceNorm2d(x))
x = self.lrelu(x)
x = self.conv2(x)
# x = nn.BatchNorm2d(nn.InstanceNorm2d(x))
x = self.lrelu(x)
x = self.conv3(x)
# x = nn.BatchNorm2d(nn.InstanceNorm2d(x))
x = self.lrelu(x)
x = self.conv4(x)
return x
class Decoder(nn.Module):
def __init__(self,z_dim):
super(Decoder,self).__init__()
self.deconv1 = nn.ConvTranspose2d(z_dim,128,kernel_size=4,stride=1,bias = False)
self.deconv2 = nn.ConvTranspose2d(128, 64,kernel_size=4, stride=2, padding=1, bias=False)
self.deconv3 = nn.ConvTranspose2d(64,1,kernel_size=4, stride=2, padding=1,bias = False)
# nn.InstanceNorm2d = nn.InstanceNorm2d()
self.lrelu = nn.LeakyReLU()
def forward(self,x):
x = self.deconv1(x)
# x = nn.BatchNorm2d(nn.InstanceNorm2d(x))
x = self.lrelu(x)
x = self.deconv2(x)
# x = nn.BatchNorm2d(nn.InstanceNorm2d(x))
x = self.lrelu(x)
x = self.deconv3(x)
return x
class VAE(nn.Module):
def __init__(self,input_size,z_dim):
super(VAE,self).__init__()
self.encoder = Encoder(input_size=1)
self.decoder = Decoder(z_dim=256)
def forward(self,input):
x = input
enc = self.encoder(x)
print(enc.shape)
mu, log_std = torch.chunk(enc, 2, dim=1)
std = torch.exp(log_std)
z_dist = dist.Normal(mu, std)
print(z_dist)
# x_rec = self.decoder(z_dist)
# return x_rec,mu,std
| 2.375 | 2 |
aggregate.py | Blosc/bloscpack-benchmarking | 1 | 12766285 | #!/usr/bin/env python
""" Award points for position. """
import benchmark_analysis_utils as bau
import pandas as pd
import sys
def aggregate(df, ratio=False):
values = ['compress', 'decompress', 'dc_no_cache']
if ratio:
values.append('ratio')
results = {}
for size in ('small', 'mid', 'large'):
for storage in ('ephemeral', 'esb'):
for complexity in ('arange', 'linspace', 'poisson', 'neuronal', 'bitcoin'):
for value in values:
it = df.loc[(size, storage, complexity)].sort(value,
ascending=value=='ratio')[value]
if ratio:
codecs = set(df.index.levels[-2]).difference(set(('tables', 'npy')))
it = it.loc[codecs]
for i,(index, value)in enumerate(it.iteritems(),start=1):
#print i, "_".join(map(str,index)), value
codec = "_".join(map(str,index))
if codec not in results:
results[codec] = i
else:
results[codec] += i
return results
df = bau.load_results_file(sys.argv[1]).sort()
df_results = pd.DataFrame.from_dict(aggregate(df, ratio=True), orient='index').sort(0)
df_results.index.names = ('codec',)
df_results.columns = ('score',)
df_results.to_csv('aggregate_with_ratio.csv')
df = bau.load_results_file(sys.argv[1]).sort()
df_results = pd.DataFrame.from_dict(aggregate(df, ratio=False), orient='index').sort(0)
df_results.index.names = ('codec',)
df_results.columns = ('score',)
df_results.to_csv('aggregate_without_ratio.csv')
| 2.578125 | 3 |
server_nodes/udp_bridge/standalone/udp_bridge_node.py | Rooholla-KhorramBakht/RaspiTrack | 0 | 12766286 | <filename>server_nodes/udp_bridge/standalone/udp_bridge_node.py<gh_stars>0
import numpy as np
import yaml
import pickle
from src.telemetry_utils import *
import argparse
import yaml
import os
import time
import signal
import sys
import threading
class recorderClass():
def __init__(self, path, ports_list):
self.path = path
self.ports_list = ports_list
self.data ={i:[] for i in ports_list}
def update(self,port,data):
self.data[port].append(data)
def record_to_file(self):
timetup = time.localtime()
stamp=time.strftime('%Y-%m-%d-%H:%M:%S', timetup)
with open(os.path.join(self.path, f'recorded-{stamp}.pckl'), 'wb') as f:
pickle.dump(self.data,f)
def callback(stamp, port, markers_in_frames):
if configs['print_markers']:
print(port,stamp,[m.shape[0] for m in markers_in_frames])
if configs['record']:
recorder.update(port, [stamp, markers_in_frames])
def termination_handling(signum,frame):
print('\nTerminating')
for telemetry_object in telemetry_objects:
telemetry_object.RX_RUNNING = False
if configs['record']:
recorder.record_to_file()
sys.exit()
if __name__=='__main__':
parser = argparse.ArgumentParser(description = 'A prgram to capture the marker data from multiple publisher nodes over UDP.')
parser.add_argument('config_file', type=str, help='The path to the config yaml file')
args = parser.parse_args()
with open(args.config_file,'r') as f:
configs = yaml.safe_load(f)
#Generate a list of telemetry objects
telemetry_objects=[]
rx_threads = []
for port in configs['camera_ports']:
obj=udp_telemetry()
obj.INPUT_IP = configs['local_ip']
obj.INPUT_PORT = port
telemetry_objects.append(obj)
rx_threads.append(threading.Thread(target=obj.start_rx, args =(callback,)))
signal.signal(signal.SIGINT, termination_handling)
if configs['record']:
recorder = recorderClass(configs['path'], configs['camera_ports'])
for thread in rx_threads:
thread.start()
| 2.390625 | 2 |
my_settings/settings.py | GregEremeev/my_settings | 1 | 12766287 | <reponame>GregEremeev/my_settings
import os
import sys
from collections.abc import Mapping
from inspect import ismodule, getfile
class ReadOnlyDict(Mapping):
def __init__(self, dict_):
self._data = dict_
def __getitem__(self, key):
return self._data[key]
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
class Settings:
__slots__ = ('_data', 'primary_settings', 'custom_settings', 'test_settings')
PYTEST_SCRIPT_NAME = 'py.test'
def __init__(self, primary_settings, custom_settings=None, test_settings=None):
self._data = {}
self.primary_settings = primary_settings
self.custom_settings = custom_settings
self.test_settings = test_settings
def __getattr__(self, name):
if not self._data:
self._read_settings_sources()
if name in self._data:
return self._data[name]
else:
raise AttributeError("{} attribute doesn't exist".format(name))
def _read_settings_sources(self):
self._load_settings(self.primary_settings)
if self.custom_settings:
self._load_settings(self.custom_settings)
if self.test_settings and self._is_test_running():
self._load_settings(self.test_settings)
def _is_test_running(self):
if self.PYTEST_SCRIPT_NAME == os.path.basename(sys.argv[0]):
return True
else:
return False
def _load_settings_from_file(self, file_path):
data = {**self._data}
if os.path.isfile(file_path):
exec(open(file_path).read(), {}, data)
self._data = ReadOnlyDict(data)
else:
raise FileNotFoundError("File {} doesn't exist".format(file_path))
def _load_settings(self, settings):
if ismodule(settings):
file_path = getfile(settings)
self._load_settings_from_file(file_path)
elif isinstance(settings, str):
if settings in os.environ:
file_path = os.environ.get(settings)
else:
file_path = settings
self._load_settings_from_file(file_path)
else:
raise TypeError('Settings must be module, file path or env variable')
| 2.28125 | 2 |
torchcontrol/plotting/__init__.py | DiffEqML/torchcontrol | 12 | 12766288 | from .cstr import *
from .quadcopter import *
| 1.1875 | 1 |
monkq/assets/account.py | zsluedem/MonkTrader | 2 | 12766289 | <gh_stars>1-10
#
# MIT License
#
# Copyright (c) 2018 WillQ
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from collections import defaultdict
from dataclasses import dataclass, field
from typing import Dict, List, Type
from monkq.assets.const import DIRECTION, POSITION_EFFECT, SIDE
from monkq.assets.instrument import FutureInstrument
from monkq.assets.order import FutureLimitOrder
from monkq.assets.positions import (
BasePosition, FuturePosition, PositionManager,
)
from monkq.assets.trade import Trade
from monkq.exchange.base import BaseSimExchange
@dataclass()
class APIKey():
api_secret: str
api_key: str
@dataclass()
class BaseAccount():
exchange: BaseSimExchange
position_cls: Type[BasePosition]
positions: PositionManager = field(init=False)
wallet_balance: float = 0
def __post_init__(self) -> None:
self.positions = PositionManager(self.position_cls, self)
def deal(self, trade: Trade) -> None:
raise NotImplementedError()
@property
def total_capital(self) -> float:
return self.wallet_balance
def __getstate__(self) -> dict:
return {
"exchange": self.exchange,
"exchange_name": self.exchange.name,
"exchange_settings": self.exchange.exchange_setting,
"position_cls": self.position_cls,
"positions": list(self.positions.items()),
"wallet_balance": self.wallet_balance
}
def __setstate__(self, state: dict) -> None:
self.exchange = state['exchange'] # type:ignore
# self.exchange = ExchangePickle() # type:ignore
self.exchange.name = state['exchange_name']
self.exchange.exchange_setting = state['exchange_settings']
self.wallet_balance = state['wallet_balance']
self.positions = {} # type:ignore
for key, value in state['positions']:
self.positions[key] = value
@dataclass()
class RealFutureAccount(BaseAccount):
api_key: APIKey = APIKey('', '')
@dataclass()
class FutureAccount(BaseAccount):
position_cls: Type[BasePosition] = FuturePosition
@property
def position_margin(self) -> float:
return sum([position.position_margin for instrument, position in self.positions.items()])
@property
def order_margin(self) -> float:
"""
Not that simple
:return:
"""
d: Dict[FutureInstrument, List[FutureLimitOrder]] = defaultdict(list)
for order in self.exchange.get_open_orders(self):
if isinstance(order, FutureLimitOrder):
d[order.instrument].append(order)
return sum([self._order_margin(instrument, orders) for instrument, orders in d.items()])
def _order_margin(self, instrument: FutureInstrument, orders: List[FutureLimitOrder]) -> float:
"""
:param instrument:
:param orders:
:return:
"""
init_rate: float
position = self.positions[instrument]
if position.isolated:
init_rate = 1 / position.leverage
else:
init_rate = instrument.init_margin_rate
long_value: float = 0.
short_value: float = 0.
opposite_orders: List[FutureLimitOrder] = []
for order in orders:
if order.side == SIDE.BUY:
long_value += order.remain_value
else:
short_value += order.remain_value
if order.direction != position.direction:
opposite_orders.append(order)
opposite_orders = sorted(opposite_orders, key=lambda x: x.price)
quantity: float = 0
opposite_offset_value: float = 0
for order in opposite_orders:
if position.direction == DIRECTION.LONG:
if quantity - order.remain_quantity < position.quantity:
opposite_offset_value += order.remain_value
quantity -= order.remain_quantity
else:
valid_quantity = position.quantity - quantity
opposite_offset_value += valid_quantity * order.price
break
else:
if quantity - order.remain_quantity > position.quantity:
opposite_offset_value += order.remain_value
quantity -= order.remain_quantity
else:
valid_quantity = quantity - position.quantity
opposite_offset_value += valid_quantity * order.price
break
if position.direction == DIRECTION.LONG:
short_value -= opposite_offset_value
else:
long_value -= opposite_offset_value
valid_value: float = max(short_value, long_value)
return valid_value * (init_rate + 2 * instrument.taker_fee)
@property
def unrealised_pnl(self) -> float:
return sum([position.unrealised_pnl for instrument, position in self.positions.items()])
@property
def margin_balance(self) -> float:
return self.wallet_balance + self.unrealised_pnl
@property
def total_capital(self) -> float:
return self.margin_balance
@property
def available_balance(self) -> float:
return self.margin_balance - self.order_margin - self.position_margin
def deal(self, trade: Trade) -> None:
position = self.positions[trade.instrument]
position_effect = position.position_effect(trade)
if position_effect == POSITION_EFFECT.OPEN or position_effect == POSITION_EFFECT.GET_MORE:
pass
else:
if position_effect == POSITION_EFFECT.CLOSE or position_effect == POSITION_EFFECT.CLOSE_PART:
profit_quantity = abs(trade.exec_quantity)
else:
profit_quantity = abs(position.quantity)
if position.direction == DIRECTION.LONG:
profit = trade.exec_price * profit_quantity - position.open_price * profit_quantity
else:
profit = position.open_price * profit_quantity - trade.exec_price * profit_quantity
self.wallet_balance += profit
self.wallet_balance -= trade.commission
position.deal(trade)
| 1.859375 | 2 |
review_model.py | santiagodiaz1993/review_score_predictor | 0 | 12766290 | import os
import pandas as pd
for file in os.listdir():
if file.endswith("json"):
reviews_data = pd.read_json("yelp_academic_dataset_review_41.json")
print(reviews_data.head())
break
| 2.6875 | 3 |
model.py | markoklemensek/Kosarka-turnir | 0 | 12766291 | import sqlite3
import pomozne_fun
conn = sqlite3.connect("kosarka_turnir")
def najboljsi_na_turnirju():
'''vrne igralca z največ doseženimi točkami na turnirju,
v primeru ko je takšnih igralcev več vrne prvega po abecedi.'''
sql = '''
SELECT
ime,
priimek
FROM (
SELECT igralec,
sum(točke) AS točke,
sum(skoki) AS skoki,
sum(podaje) AS podaje
FROM statistika
GROUP BY igralec
)
JOIN
igralec ON igralec = igralec.id
ORDER BY točke DESC;
'''
ime_prii = conn.execute(sql).fetchone()
ime = ime_prii[0]
priimek = ime_prii[1]
return '{0} {1}'.format(ime, priimek)
def najboljsi_podajalec():
'''vrne igralca z največ doseženimi točkami na turnirju,
v primeru ko je takšnih igralcev več vrne prvega po abecedi.'''
sql = '''
SELECT ime,
priimek
FROM (
SELECT igralec,
sum(točke) AS točke,
sum(skoki) AS skoki,
sum(podaje) AS podaje
FROM statistika
GROUP BY igralec
)
JOIN
igralec ON igralec = igralec.id
ORDER BY podaje DESC;
'''
ime_prii = conn.execute(sql).fetchone()
ime = ime_prii[0]
priimek = ime_prii[1]
return '{0} {1}'.format(ime, priimek)
def najboljsi_skakalec():
'''vrne igralca z največ doseženimi točkami na turnirju,
v primeru ko je takšnih igralcev več vrne prvega po abecedi.'''
sql = '''
SELECT ime,
priimek
FROM (
SELECT igralec,
sum(točke) AS točke,
sum(skoki) AS skoki,
sum(podaje) AS podaje
FROM statistika
GROUP BY igralec
)
JOIN
igralec ON igralec = igralec.id
ORDER BY skoki DESC;
'''
ime_prii = conn.execute(sql).fetchone()
ime = ime_prii[0]
priimek = ime_prii[1]
return '{0} {1}'.format(ime, priimek)
def rezultati_tekem():
'''vrne slovar, katerega ključi so id-ji tekem, njihove vrednosti pa tabele z dvema elemntoma, prvi je število
točk domače ekipe, drugi pa gostujoče'''
slovar_rez = dict()
for i in range(28): #za vsako tekmo pogledamo kateri ekipi sta tekmovali na njej
j = i+1
slovar_rez[j] = list()
sql = '''
SELECT domači,
gosti
FROM tekma
WHERE tekma.id = ?;
'''
for domači, gosti in conn.execute(sql, [j]): #pogledamo katera ekipa je dosegla koliko točk
sql = '''
SELECT SUM(statistika.točke)
FROM tekma
JOIN
statistika ON tekma.id = statistika.tekma
JOIN
igralec ON statistika.igralec = igralec.id
WHERE tekma.id = ? AND
igralec.ekipa = ?;
'''
for točke in conn.execute(sql, [j, domači]): #pogledamo točke prve ekipe
slovar_rez[j].append(točke[0])
sql = '''
SELECT SUM(statistika.točke)
FROM tekma
JOIN
statistika ON tekma.id = statistika.tekma
JOIN
igralec ON statistika.igralec = igralec.id
WHERE tekma.id = ? AND
igralec.ekipa = ?;
'''
for točke in conn.execute(sql, [j, gosti]): #pogledamo točke druge ekipe
slovar_rez[j].append(točke[0])
koncani_sl = dict() #slovar kateremu priredim ključe
for kl in slovar_rez.keys(): #ključe spremenim v tuple imen ekip
koncani_sl[pomozne_fun.ekipi_from_tekma(kl)] = slovar_rez[kl]
return koncani_sl #vrnemo slovar kakršnega smo želeli
def lestvica_rezultatov():
'''Vrne urejen slovar ekip, kjer so ključi idji ekip in vrednosti število točk, ki so jih dosegle '''
slovar_rez = dict() #slovar rezultatov: ključ je id ekipe, vrednost je dosežene točke (3 za zmago 1 za remi 0 za poraz)
sql = '''
SELECT id
FROM ekipa;
'''
i = 1
for _ in conn.execute(sql): #začetne točke posamezne ekipe nastavimo na 0
slovar_rez[i] = 0
i += 1
for i in range(28): #za vsako tekmo pogledamo kateri ekipi sta tekmovali na njej
j = i+1
sql = '''
SELECT domači,
gosti
FROM tekma
WHERE tekma.id = ?;
'''
for domači, gosti in conn.execute(sql, [j]): #pogledamo katera ekipa je dosegla koliko točk (katera je zmagala)
sql = '''
SELECT SUM(statistika.točke)
FROM tekma
JOIN
statistika ON tekma.id = statistika.tekma
JOIN
igralec ON statistika.igralec = igralec.id
WHERE tekma.id = ? AND
igralec.ekipa = ?;
'''
for točke in conn.execute(sql, [j, domači]): #pogledamo točke prve ekipe
domači_rez = točke
sql = '''
SELECT SUM(statistika.točke)
FROM tekma
JOIN
statistika ON tekma.id = statistika.tekma
JOIN
igralec ON statistika.igralec = igralec.id
WHERE tekma.id = ? AND
igralec.ekipa = ?;
'''
for točke in conn.execute(sql, [j, gosti]): #pogledamo točke druge ekipe
gosti_rez = točke
if domači_rez > gosti_rez: #dodamo ustrezne točke v slovar rezultatov
slovar_rez[domači] = slovar_rez[domači] + 3
elif domači_rez == gosti_rez:
slovar_rez[domači] = slovar_rez[domači] + 1
slovar_rez[gosti] = slovar_rez[gosti] + 1
elif domači_rez < gosti_rez:
slovar_rez[gosti] = slovar_rez[gosti] + 3
urejen_sl = {}
for ključ in slovar_rez:
maks = max(slovar_rez.values())
for ključ2 in slovar_rez:
if slovar_rez[ključ2] == maks:
urejen_sl[pomozne_fun.ekipa(ključ2)] = maks
slovar_rez[ključ2] = 0 #nastavimo vrednost na 0
break
return urejen_sl #uredimo slovar in vrnemo urejen slovar, kjer so ključi id ekip in vrednosti končno število točk
def povprečja():
'''vrne povprečno število doseženih točk, podaj, skokov ter pripadajočo ekipo vseh igralcev.'''
sql = '''
SELECT igralec.ime,
priimek,
ekipa.ime,
round(avg(točke), 0),
round(avg(skoki), 0),
round(avg(podaje), 0)
FROM (
statistika
JOIN
igralec ON igralec.id = statistika.igralec
)
JOIN
ekipa ON igralec.ekipa = ekipa.id
GROUP BY igralec;
'''
return conn.execute(sql).fetchall()
def seznam_ekip():
'''vrne seznam imen ekip'''
sql = '''
SELECT ime
FROM ekipa;
'''
tab_ekip = []
for ime in conn.execute(sql):
tab_ekip.append(ime[0])
return tab_ekip
def seznam_igralcev(ekipa1, ekipa2):
'''vrne seznam igralcev ki so igrali na tekmi med ekipa1 in ekipa2'''
sql1 = '''
SELECT id
FROM ekipa
WHERE ime = ?;
'''
sql2 = '''
SELECT igralec.ime,
igralec.priimek,
tekma.gosti,
tekma.domači
FROM tekma
JOIN
statistika ON tekma.id = statistika.tekma
JOIN
igralec ON statistika.igralec = igralec.id
WHERE (domači = ? AND
gosti = ?) OR
(domači = ? AND
gosti = ?);
'''
prva = conn.execute(sql1, [ekipa1]).fetchone() #pridobimo id-je ekip
id1 = prva[0]
druga = conn.execute(sql1, [ekipa2]).fetchone()
id2 = druga[0]
tab_igralcev = []
for ime, priimek, gosti, domači in conn.execute(sql2, [id1, id2, id2, id1]): #poiščemo igralce ki, so igrali na tekmi med ekipa1 in ekipa2
tab_igralcev.append(ime + '_' + priimek + '_' + str(gosti) + '_' + str(domači))
return tab_igralcev
def spremeni_ime_ekipe(ime, novo_ime):
'''spremeni ime ekipe v novo ime'''
sql1 ='''
BEGIN TRANSACTION;
'''
sql2 = '''
UPDATE ekipa
SET ime = ?
WHERE ime = ?
'''
sql3 ='''
COMMIT TRANSACTION;
'''
conn.execute(sql1)
conn.execute(sql2, [novo_ime, ime])
conn.execute(sql3)
return None
def spremeni_statistiko_igralca(ime, priimek, id1, id2, tocke, podaje, skoki):
'''funkcija spremeni statistiko igralca na tekmi med ekipama z id-ji id1 in id2,
določi mu "nove" točke, podaje in skoke'''
sql1 = '''
SELECT id
FROM tekma
WHERE (domači = ? AND
gosti = ?) OR
(domači = ? AND
gosti = ?);
'''
id_tekme = conn.execute(sql1, [id1, id2, id2, id1]).fetchone()
id_tekme = id_tekme[0]
sql2 = '''
SELECT id
FROM igralec
WHERE ime = ? AND
priimek = ?;
'''
id_igralca = conn.execute(sql2, [ime, priimek]).fetchone()
id_igralca = id_igralca[0]
sql3 ='''
BEGIN TRANSACTION;
'''
sql4 = '''
UPDATE statistika
SET točke = ?,
podaje = ?,
skoki = ?
WHERE igralec = ? AND
tekma = ?;
'''
sql5 ='''
COMMIT TRANSACTION;
'''
conn.execute(sql3)
conn.execute(sql4, [tocke, podaje, skoki, id_igralca, id_tekme])
conn.execute(sql5)
return None
def rezultat_igralca_na_tekmi(ime, priimek, id1, id2):
'''vrne tabelo v kateri so: ime, priimek, št. točk, št. podaj, št. skokov, prva ekipa, druga ekipa.
Te točke, podaje in skoke je igralec dosegel na tekmi med ekipama z id-ji id1 in id2'''
sql = '''
SELECT igralec.ime,
igralec.priimek,
statistika.točke,
statistika.podaje,
statistika.skoki
FROM tekma
JOIN
statistika ON tekma.id = statistika.tekma
JOIN
igralec ON statistika.igralec = igralec.id
WHERE igralec.ime = ? AND
igralec.priimek = ? AND
( (tekma.domači = ? AND
tekma.gosti = ?) OR
(tekma.domači = ? AND
tekma.gosti = ?) );
'''
podatki = [] # elementi v tabeli si sledijo: ime, priimek, št skokov, pt podaj, ekipa1, ekipa2
for ime, priimek, točke, podaje, skoki in conn.execute(sql, [ime, priimek, id1, id2, id2, id1]): #poiščemo statistiko igralca na tekmi
ekipa1 = pomozne_fun.ekipa(id1)
ekipa2 = pomozne_fun.ekipa(id2)
podatki = [ime, priimek, točke, podaje, skoki, ekipa1, ekipa2]
return podatki
conn.execute('VACUUM')
| 3 | 3 |
mpfmc/tests/test_SlidePlayer.py | muffler-aus/mpf-mc | 0 | 12766292 | import weakref
import gc
from kivy.uix.screenmanager import WipeTransition, FadeTransition
from mpfmc.config_players.slide_player import McSlidePlayer
from mpfmc.tests.MpfMcTestCase import MpfMcTestCase
from mpfmc.transitions.move_in import MoveInTransition
from mpf.tests.MpfTestCase import MpfTestCase
import mpfmc.core
import os
class TestSlidePlayer(MpfMcTestCase):
def get_machine_path(self):
return 'tests/machine_files/slide_player'
def get_config_file(self):
return 'test_slide_player.yaml'
def test_slide_on_default_display(self):
self.mc.events.post('show_slide_1')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
# now replace that slide at the same priority and make sure it works
self.mc.events.post('show_slide_4')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_4')
def test_slide_on_default_display_hardcoded(self):
self.mc.events.post('show_slide_2')
self.advance_time()
self.assertEqual(self.mc.displays['display1'].current_slide_name,
'machine_slide_2')
def test_animation(self):
self.mc.events.post("show_slide_with_animations")
self.advance_time()
self.assertEqual(self.mc.displays['display1'].current_slide_name,
'my_slide')
slide = weakref.ref(self.mc.targets['display1'].current_slide)
self.assertTrue(slide())
self.mc.events.post("remove_slide_with_animations")
self.advance_time()
self.assertEqual(self.mc.displays['display1'].current_slide_name,
'display1_blank')
self.mc.events.post('show_slide_1')
self.advance_time()
gc.collect()
self.advance_time()
self.assertFalse(slide())
def test_slide_on_second_display(self):
self.mc.events.post('show_slide_3')
self.advance_time()
self.assertEqual(self.mc.displays['display2'].current_slide_name,
'machine_slide_3')
def test_priority_from_slide_player(self):
self.mc.events.post('show_slide_4_p200')
self.advance_time()
self.assertEqual(self.mc.displays['display1'].current_slide_name,
'machine_slide_4')
self.assertEqual(self.mc.displays['display1'].current_slide.priority,
200)
def test_force_slide(self):
self.mc.events.post('show_slide_4_p200')
self.advance_time()
self.assertEqual(self.mc.displays['display1'].current_slide_name,
'machine_slide_4')
self.assertEqual(self.mc.displays['display1'].current_slide.priority,
200)
self.mc.events.post('show_slide_1_force')
self.advance_time()
self.assertEqual(self.mc.displays['display1'].current_slide_name,
'machine_slide_1')
self.assertEqual(self.mc.displays['display1'].current_slide.priority,
0)
def test_dont_show_slide(self):
self.mc.events.post('show_slide_1')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
self.assertEqual(self.mc.displays['display1'].current_slide.priority,
0)
# request a higher priority slide, but don't show it
self.mc.events.post('show_slide_5_dont_show')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
self.assertEqual(self.mc.displays['display1'].current_slide.priority,
0)
def test_mode_slide_player(self):
# set a baseline slide
self.mc.events.post('show_slide_1')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
# post the slide_player event from the mode. Should not show the slide
# since the mode is not running
self.mc.events.post('show_mode1_slide')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
# start the mode and then post that event again. The slide should
# switch
self.mc.modes['mode1'].start()
self.mc.events.post('show_mode1_slide')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'mode1_slide')
slide = weakref.ref(self.mc.targets['display1'].current_slide)
self.assertTrue(slide())
# stop the mode and make sure the slide is removed
num_slides = len(self.mc.targets['display1'].slides)
self.mc.modes['mode1'].stop()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
self.assertEqual(len(self.mc.targets['display1'].slides),
num_slides - 1)
gc.collect()
self.assertFalse(slide())
# post the slide_player event from the mode. Should not show the slide
# since the mode is not running
self.mc.events.post('show_mode1_slide')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
# show a priority 200 slide from the machine config
self.mc.events.post('show_slide_4_p200')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_4')
self.assertEqual(self.mc.targets['display1'].current_slide.priority,
200)
# start the mode again (priority 500)
self.mc.modes['mode1'].start()
# show a slide, but priority 150 which means the slide will not be
# shown
self.mc.events.post('show_mode1_slide_2')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_4')
self.assertEqual(self.mc.targets['display1'].current_slide.priority,
200)
# now kill the current slide and the mode slide should show
self.mc.targets['display1'].remove_slide('machine_slide_4')
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'mode1_slide_2')
self.assertEqual(self.mc.targets['display1'].current_slide.priority,
150)
def test_from_show_via_bcp(self):
from mpf.core.bcp.bcp_socket_client import encode_command_string
show_slide_section = dict()
show_slide_section['widgets'] = list()
show_slide_section['widgets'].append(dict(
type='text', text='TEST FROM SHOW'))
player = McSlidePlayer(self.mc)
show_slide_section = player._validate_config_item('slide1', show_slide_section)
bcp_string = encode_command_string('trigger', name='slides_play', context='test_context', priority=1,
settings=show_slide_section)
self.mc.bcp_processor.receive_bcp_message(bcp_string)
self.advance_time()
def test_slides_created_in_slide_player(self):
# Anon slides are where the widgets are listed in the slide_player
# section of a config file or the slides section of a show
self.mc.events.post('anon_slide_dict')
self.advance_time()
self.mc.events.post('anon_slide_list')
self.advance_time()
self.mc.events.post('anon_slide_widgets')
self.advance_time()
slide = weakref.ref(self.mc.targets['display1'].current_slide)
self.assertTrue(slide())
self.mc.events.post('anon_slide_widgets2')
self.advance_time(1)
slide2 = weakref.ref(self.mc.targets['display1'].current_slide)
gc.collect()
self.assertFalse(slide())
self.assertTrue(slide2())
def test_expire_in_slide(self):
# tests that slide expire time works when configured in a slide
self.mc.events.post('base_slide_no_expire')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_6')
self.mc.events.post('show_slide_7') # expire 1s
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_7')
self.advance_time(1)
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_6')
def test_expire_in_slide_player(self):
# tests that expire time works when configured in the slide player
self.mc.events.post('base_slide_no_expire')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_6')
self.mc.events.post('new_slide_expire') # expire 1s
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
self.advance_time(1)
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_6')
def test_expire_with_transition_out_in_slide(self):
# Tests a slide expiring where the expiring slide has a transition
self.mc.events.post('base_slide_no_expire')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_6')
# show a slide which expires in 1 sec, and has a transition out set
self.mc.events.post('show_slide_8')
self.advance_time(.1)
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_8')
# advance to after this slide_8 expired, transition should be in effect
self.advance_time(1)
self.assertTrue(isinstance(self.mc.targets['display1'].transition,
WipeTransition))
# advance to transition done, should be back to the original slide
self.advance_time(1)
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_6')
def test_current_slide_transition_out(self):
# Tests a new slide with no transition, but the current slide has one,
# so it uses that
# show a slide, no expire, but with transition out
self.mc.events.post('show_slide_9')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_9')
# show a new slide with no transition
self.assertIsNone(self.mc.slides['machine_slide_6']['transition'])
self.mc.events.post('machine_slide_6')
self.advance_time()
# transition from first slide should be happening
self.assertTrue(isinstance(self.mc.targets['display1'].transition,
MoveInTransition))
def test_both_slides_transitions(self):
# current slide has transition out, and new slide has transition, so
# transition of new slide takes precendence
# show a slide, no expire, but with transition out
self.assertEqual(
self.mc.slides['machine_slide_8']['transition_out']['type'],
'wipe')
self.mc.events.post('show_slide_8')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_8')
# show a new slide with a different transition in
self.assertEqual(
self.mc.slides['machine_slide_9']['transition']['type'], 'move_in')
self.mc.events.post('show_slide_9')
self.advance_time()
# transition from second slide should be happening
self.assertTrue(isinstance(self.mc.targets['display1'].transition,
MoveInTransition))
def test_transition_in_slide_player(self):
# transition is specified in slide player for slide that does not have
# transition
# show a base slide with no transition
self.assertIsNone(self.mc.slides['machine_slide_4']['transition'])
self.mc.events.post('machine_slide_4')
self.advance_time()
# show a second slide where the slide has no transition, but the
# slide player does have a transition
self.assertIsNone(self.mc.slides['machine_slide_5']['transition'])
self.mc.events.post('show_slide_5_with_transition')
self.advance_time()
# make sure the transition is happening
self.assertTrue(isinstance(self.mc.targets['display1'].transition,
FadeTransition))
def test_transition_in_slide_player_override(self):
# transition in slide player for slide that already has a transition.
# the slide player transition should override the slide one
# show a base slide with no transition
self.assertIsNone(self.mc.slides['machine_slide_4']['transition'])
self.mc.events.post('machine_slide_4')
self.advance_time()
# show a second slide where the slide has a transition, but the
# slide player has a different transition, so the slide player
# should take precedence
self.assertEqual(
self.mc.slides['machine_slide_9']['transition']['type'], 'move_in')
self.mc.events.post('show_slide_5_with_transition')
self.advance_time()
# make sure the transition from the slide player is happening
self.assertTrue(isinstance(self.mc.targets['display1'].transition,
FadeTransition))
def test_slide_show(self):
# tests the 'show' feature of a slide. This is not a slide show, but
# rather a setting which controls whether a slide is shown right away
# or not
# show a base slide
self.mc.events.post('show_slide_1')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
# post new slide, but with show=False, so it should not show
self.mc.events.post('slide_2_dont_show')
self.advance_time()
# Should still be slide 1
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
def test_slide_removal(self):
# Also test slide events
self.mock_event('slide_machine_slide_1_active')
self.mock_event('slide_machine_slide_1_created')
self.mock_event('slide_machine_slide_1_removed')
self.mock_event('slide_machine_slide_4_active')
self.mock_event('slide_machine_slide_4_created')
self.mock_event('slide_machine_slide_4_removed')
# show a base slide
self.mc.events.post('show_slide_1')
self.advance_time(0.3)
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
self.assertEventCalled('slide_machine_slide_1_created')
self.assertEventCalled('slide_machine_slide_1_active')
self.assertEventNotCalled('slide_machine_slide_1_removed')
self.assertEventNotCalled('slide_machine_slide_4_created')
self.assertEventNotCalled('slide_machine_slide_4_active')
self.assertEventNotCalled('slide_machine_slide_4_removed')
# show another slide
self.mc.events.post('show_slide_4')
self.advance_time(0.3)
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_4')
self.assertEventCalled('slide_machine_slide_1_created', 1)
self.assertEventCalled('slide_machine_slide_1_active', 1)
self.assertEventNotCalled('slide_machine_slide_1_removed')
self.assertEventCalled('slide_machine_slide_4_created', 1)
self.assertEventCalled('slide_machine_slide_4_active', 1)
self.assertEventNotCalled('slide_machine_slide_4_removed')
# make sure base slide comes back
self.mc.events.post('remove_slide_4')
self.advance_time(0.3)
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
self.assertEventCalled('slide_machine_slide_1_created', 1)
self.assertEventCalled('slide_machine_slide_1_active', 2)
self.assertEventNotCalled('slide_machine_slide_1_removed')
self.assertEventCalled('slide_machine_slide_4_created', 1)
self.assertEventCalled('slide_machine_slide_4_active', 1)
self.assertEventCalled('slide_machine_slide_4_removed', 1)
def test_slide_removal_new_transition(self):
# show a base slide
self.mc.events.post('show_slide_1')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
# show a slide with not transition out
self.assertIsNone(self.mc.slides['machine_slide_4']['transition_out'])
self.mc.events.post('show_slide_4')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_4')
# remove that slide with a transition
self.mc.events.post('remove_slide_4_with_transition')
self.advance_time(.1)
# make sure the transition is taking effect
self.assertTrue(isinstance(self.mc.targets['display1'].transition,
WipeTransition))
# original slide is back
self.advance_time(1)
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
def test_slide_removal_existing_transition(self):
# show a base slide
self.mc.events.post('show_slide_1')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
# show a slide which has a transition out
self.assertEqual(
self.mc.slides['machine_slide_8']['transition_out']['type'],
'wipe')
self.mc.events.post('show_slide_8')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_8')
# post an event which does not have a transition
self.mc.events.post('remove_slide_8')
self.advance_time(.1)
# make sure the transition is taking effect
self.assertTrue(isinstance(self.mc.targets['display1'].transition,
WipeTransition))
# original slide is back
self.advance_time(1)
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
def test_slide_removal_override_transition(self):
# show a base slide
self.mc.events.post('show_slide_1')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
# show a slide which has a wipe transition
self.assertEqual(
self.mc.slides['machine_slide_8']['transition_out']['type'],
'wipe')
self.mc.events.post('show_slide_8')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_8')
# remove slide with a fade transition
self.mc.events.post('remove_slide_8_fade')
self.advance_time(.1)
# make sure it uses the fade transition from the slide player
self.assertTrue(isinstance(self.mc.targets['display1'].transition,
FadeTransition))
# original slide should be back
self.advance_time(1)
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
def test_removing_last_slide(self):
self.mc.events.post('show_slide_1')
self.advance_time()
self.assertEqual(self.mc.targets['default'].current_slide_name,
'machine_slide_1')
self.advance_time()
self.mc.targets['default'].remove_slide('machine_slide_1')
self.advance_time()
self.assertEqual(self.mc.targets['default'].current_slide_name,
'display1_blank')
self.assertEqual(1, len(self.mc.targets['default'].screens))
def test_expire_non_current_slide(self):
self.mc.events.post('slide1_expire_1s')
self.advance_time(.1)
self.assertEqual(self.mc.targets['default'].current_slide_name,
'machine_slide_1')
# show slide 2 which should expire in 1s
self.mc.events.post('slide2_expire_1s')
self.advance_time(.1)
self.assertEqual(self.mc.targets['default'].current_slide_name,
'machine_slide_2')
self.advance_time(1)
# should be back to blank, because slide1 expired while slide 2 was up
self.assertEqual(self.mc.targets['default'].current_slide_name,
'display1_blank')
self.assertEqual(1, len(self.mc.targets['default'].screens))
def test_remove_already_removed_slide(self):
self.mc.events.post('slide1_expire_1s')
self.advance_time(.1)
self.assertEqual(self.mc.targets['default'].current_slide_name,
'machine_slide_1')
# grab a reference to this slide
slide1 = self.mc.targets['default'].current_slide
self.advance_time(1)
# should be blank, because slide1 expired
self.assertEqual(self.mc.targets['default'].current_slide_name,
'display1_blank')
self.assertEqual(1, len(self.mc.targets['default'].screens))
# now try to call the now-gone slide's remove
slide1.remove()
self.advance_time()
def test_animation_triggers(self):
bcp_command1 = ('register_trigger', None, {'event': 'flash_widget_1'})
bcp_command2 = ('register_trigger', None, {'event': 'flash_widget_2'})
self.assertNotIn(bcp_command1, self.sent_bcp_commands)
self.assertNotIn(bcp_command2, self.sent_bcp_commands)
self.mc.events.post("client_connected")
self.advance_time()
self.assertIn(bcp_command1, self.sent_bcp_commands)
self.assertIn(bcp_command2, self.sent_bcp_commands)
def test_play_multiple_times(self):
# set a baseline slide
self.mc.events.post('show_slide_1')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name,
'machine_slide_1')
# start the mode and then post that event again. The slide should
# switch
self.mc.modes['mode1'].start()
self.mc.events.post('show_mode1_slide')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name, 'mode1_slide')
slide = weakref.ref(self.mc.targets['display1'].current_slide)
for i in range(10):
self.mc.events.post('show_mode1_slide')
self.advance_time()
self.assertEqual(self.mc.targets['display1'].current_slide_name, 'mode1_slide')
# run garbage collector
gc.collect()
# weak ref to the slide should be none
self.assertIsNone(slide())
# build weak ref to curent slide
slide = weakref.ref(self.mc.targets['display1'].current_slide)
class TestMpfSlidePlayer(MpfTestCase):
# runs the MPF tests (and not the MPF-MC ones) to test the MPF side of the
# slide player plugin
def __init__(self, methodName):
super().__init__(methodName)
# remove config patch which disables bcp
del self.machine_config_patches['bcp']
def getAbsoluteMachinePath(self):
# override the base to we set the patch based on the mpfmc location
return os.path.abspath(os.path.join(
mpfmc.core.__path__[0], os.pardir, self.getMachinePath()))
def get_enable_plugins(self):
return True
def getConfigFile(self):
return 'test_slide_player.yaml'
def getMachinePath(self):
return 'tests/machine_files/slide_player/'
# todo add tests
| 2.125 | 2 |
setup.py | electrumsv/libcuckoofilter | 0 | 12766293 | from distutils.core import setup, Extension
import sys
module1 = Extension('bsvcuckoo',
include_dirs=['include'],
sources=["src/cuckoo_filter.c", "src/cuckoo_python.c"],
# https://cibuildwheel.readthedocs.io/en/stable/faq/#windows-importerror-dll-load-failed-the-specific-module-could-not-be-found
extra_compile_args=['/d2FH4-'] if sys.platform == 'win32' else [])
setup(name='bsvcuckoo',
version='1.3',
description='A cuckoo filter implementation.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/electrumsv/libcuckoofilter',
long_description=open('README.md', 'r').read(),
long_description_content_type='text/markdown',
license='MIT Licence',
# This warns about no `__init__.py` file but seems to install workable types.
packages=['bsvcuckoo-stubs'],
package_data={"bsvcuckoo-stubs": ['__init__.pyi']},
# The actual package.
ext_modules=[ module1 ])
| 1.765625 | 2 |
testCloudygrids.py | zpace/manga_nebular | 0 | 12766294 | from importer import *
import sys, os
sys.path.append('/usr/data/minhas/zpace/stellarmass_pca')
import read_results as from_pca
pca_basedir = '/usr/data/minhas2/zpace/CSPs/CSPs_CKC14_MaNGA_20181026-1'
import numpy as np
import matplotlib.pyplot as plt
import pymc3
import manga_tools as m
import metallicity
import pi_grid
import abundances
from extinction import fitzpatrick99
from astropy.cosmology import WMAP9 as cosmo
cloudy_fsps_grid = pi_grid.load_CloudyFSPS_grid(
linenames_fname='./data/cloudyFSPS/linenames.dat',
data_fname='./data/cloudyFSPS/ZAU_ND_mist.lines',
yaml_cfg_fname='./data/cloudyFSPS/cloudyFSPS.yaml',
elines_tab_key='CloudyFSPS-name', elines_table=pi_grid.elines_table,
lines_used=pi_grid.default_lines)
cloudy_fsps_grid.learnspace_GP()
elines = pi_grid.elines_table.copy()
elines.add_index('name')
line_ls = elines.loc[cloudy_fsps_grid.observable_names]['lvac']
ntest = 1
'''
logZ_real = np.random.uniform(*cloudy_fsps_grid.range('logZ'), ntest)
logU_real = np.random.uniform(*cloudy_fsps_grid.range('logU'), ntest)
age_real = np.random.uniform(*cloudy_fsps_grid.range('Age'), ntest)
AV_real = np.random.exponential(1., ntest)
logQH_real = np.random.uniform(48.5, 51., ntest)
linelums_real = 10.**logQH_real[:, None] * cloudy_fsps_grid.predict(
np.stack([logZ_real, logU_real, age_real], axis=0))
extinction_at_AV1 = fitzpatrick99(wave=line_ls, a_v=1., r_v=3.1)
A_lambda = np.outer(AV_real, extinction_at_AV1)
atten = 10.**(-0.4 * A_lambda)
zdist = .0155
distmod = (4. * np.pi * cosmo.luminosity_distance(zdist)**2.).to('cm2').value
linefluxes_real = linelums_real * atten / distmod / 1.0e-17
snr = np.random.uniform(2., 50., linefluxes_real.shape)
real_unc = linefluxes_real / snr
unc_factor = np.e
linefluxes_noise = real_unc * np.random.randn(*linefluxes_real.shape)
linefluxes_obs = linefluxes_real + linefluxes_noise
obs_unc = real_unc / unc_factor
mask_obs = np.any(linefluxes_obs < 0., axis=1)
print(linefluxes_obs.shape)
'''
'''
fakemodel, faketrace = metallicity.find_ism_params(
grid=cloudy_fsps_grid, dustlaw=fitzpatrick99,
line_obs=[linefluxes_obs[~mask_obs], obs_unc[~mask_obs], mask_obs[~mask_obs]],
line_ls=line_ls, drpall_row={'nsa_zdist': zdist})
'''
#####
drpall = m.load_drpall(metallicity.mpl_v)
drpall.add_index('plateifu')
drpall_row = drpall.loc['9497-9101']
plate, ifu = drpall_row['plateifu'].split('-')
el = metallicity.Elines.DAP_from_plateifu(
plate, ifu, mpl_v, 'SPX-GAU-MILESHC', data_colname='MPL-6-name',
lines_used=cloudy_fsps_grid.observable_names, elines_table=elines)
pcares = from_pca.PCAOutput.from_plateifu(
basedir=os.path.join(pca_basedir, 'results'), plate=plate, ifu=ifu)
#'''
model, trace, f, unc, Rreff = metallicity.find_ism_params(
grid=cloudy_fsps_grid, dustlaw=fitzpatrick99,
obs=el, pca_result=pcares, line_ls=line_ls, drpall_row=drpall_row,
nrad=5, m_at_rad=3, rlim=[0.5, 2.])
#'''
model.profile(model.logpt).summary()
| 1.726563 | 2 |
simpleTicket/siteEngine/migrations/0005_auto_20160622_1028.py | abogeorge/simpleTicket | 0 | 12766295 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-22 07:28
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('siteEngine', '0004_auto_20160621_1445'),
]
operations = [
migrations.CreateModel(
name='RoleAdmin',
fields=[
('role_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='siteEngine.Role')),
],
bases=('siteEngine.role',),
),
migrations.AlterField(
model_name='userprofile',
name='user_auth',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 1.625 | 2 |
quora/wsgi.py | alexricheburton/gittest | 116 | 12766296 | import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "quora.settings")
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
from whitenoise.django import DjangoWhiteNoise
application = Cling(get_wsgi_application())
application = DjangoWhiteNoise(application)
| 1.398438 | 1 |
notifier/grabbers/better_advice.py | thejeshpr/notifier | 0 | 12766297 | import urllib.parse
from notifier.grabbers.base import Base, Internet
class BetterAdvice(object):
@staticmethod
def sync(obj: Base, *args, **kwargs):
r = Internet.html_get(obj.sync_type.base_url)
links = r.html.xpath('/html/body/div[*]/div[*]/div/div[*]/div[*]/section/div[*]/div[*]/div[*]/a')
for a in links[::-1]:
path = a.attrs.get('href').split("?")[0]
url = urllib.parse.urljoin(obj.sync_type.base_url, path)
name = a.text.strip()
obj.add_text_task(
unique_key=url,
name=name,
url=url,
data=dict(text=url)
)
| 2.5 | 2 |
resnet.py | Akihiro-A-Fujii/tf__resnet | 6 | 12766298 | import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Activation, Dense,Flatten, Add, TimeDistributed, Flatten, BatchNormalization
from tensorflow.keras.layers import Conv1D,MaxPooling1D,GlobalAveragePooling1D, GlobalMaxPooling1D
from tensorflow.keras.layers import Conv2D,MaxPooling2D,GlobalAveragePooling2D, GlobalMaxPooling2D
from tensorflow.keras.layers import Conv3D,MaxPooling3D,GlobalAveragePooling3D, GlobalMaxPooling3D
from tensorflow.keras.layers import Layer
from .groupnorm import GroupNormalization
def define_NormLayers(norm):
if norm=="BatchNorm":
return BatchNormalization
elif norm=="GroupNorm":
return GroupNormalization
else:
raise Exception("Normalization that you specify is invalid! Current value:",norm)
def define_ConvLayer(mode):
if mode=="2D" or mode=="TimeD":
return Conv2D
elif mode=="1D":
return Conv1D
elif mode=="3D":
return Conv3D
else:
raise Exception("Convolution mode that you specify is invalid! Current value:",mode)
def define_Pooling(mode):
if mode=="2D" or mode=="TimeD":
return MaxPooling2D
elif mode=="1D":
return MaxPooling1D
elif mode=="3D":
return MaxPooling3D
else:
raise Exception("Convolution mode that you specify is invalid! Current value:",mode)
def define_GlobalPooling(mode, pooling):
if (mode=="2D" or mode=="TimeD") and pooling=="max":
return GlobalMaxPooling2D
elif mode=="1D" and pooling=="max":
return GlobalMaxPooling1D
elif mode=="3D" and pooling=="max":
return GlobalMaxPooling3D
elif (mode=="2D" or mode=="TimeD") and pooling=="ave":
return GlobalAveragePooling2D
elif mode=="1D" and pooling=="ave":
return GlobalAveragePooling1D
elif mode=="3D" and pooling=="ave":
return GlobalAveragePooling3D
class Conv_stage1_block(tf.keras.Model):
def __init__(self, filters, strides=2, mode="2D", norm="BatchNorm",kernel_initializer='he_normal',name=None):
super(Conv_stage1_block, self).__init__(name=name)
NormLayer = define_NormLayers(norm) # Define Normalization Layers
ConvLayer = define_ConvLayer(mode) #Define ConvLayer
MaxPooling = define_Pooling(mode) # Define Pooling
if mode=="1D" or mode=="2D" or mode=="3D":
self.conv1 = ConvLayer(filters, kernel_size=7,strides=strides,kernel_initializer=kernel_initializer, padding='same')
self.bn1 = NormLayer()
self.act1 = Activation('relu')
self.pool1 = MaxPooling(pool_size=3, strides=2,padding="same")
elif mode=="TimeD":
self.conv1 = TimeDistributed(ConvLayer(filters, kernel_size=7,kernel_initializer=kernel_initializer,strides=strides, padding='same'))
self.bn1 = TimeDistributed(NormLayer())
self.act1 = TimeDistributed(Activation('relu'))
self.pool1 = TimeDistributed(MaxPooling(pool_size=(3,3), strides=(2,2),padding="same"))
def call(self, x):
h = self.conv1(x)
h = self.bn1(h)
h = self.act1(h)
output = self.pool1(h)
return output
class Identity_bottleneck_block(tf.keras.Model):
def __init__(self, filters, kernel_size=3, mode="2D", norm="BatchNorm",kernel_initializer='he_normal' ,name=None):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
super(Identity_bottleneck_block, self).__init__(name=name)
NormLayer = define_NormLayers(norm) # Define Normalization Layers
ConvLayer = define_ConvLayer(mode)
filters1, filters2, filters3 = filters
if mode=="1D" or mode=="2D" or mode=="3D":
self.conv1 = ConvLayer(filters1, 1, kernel_initializer=kernel_initializer,padding='same')
self.bn1 = NormLayer()
self.relu1 = Activation('relu')
self.conv2 = ConvLayer(filters2, kernel_size, kernel_initializer=kernel_initializer,padding='same')
self.bn2 = NormLayer()
self.relu2 = Activation('relu')
self.conv3 = ConvLayer(filters3, 1, kernel_initializer=kernel_initializer,padding='same')
self.bn3 = NormLayer()
self.relu3 = Activation('relu')
elif mode=="TimeD":
self.conv1 = TimeDistributed(ConvLayer(filters1, (1,1), kernel_initializer=kernel_initializer,padding='same'))
self.bn1 = TimeDistributed(NormLayer())
self.relu1 = TimeDistributed(Activation('relu'))
self.conv2 = TimeDistributed(ConvLayer(filters2, kernel_size, kernel_initializer=kernel_initializer,padding='same'))
self.bn2 = TimeDistributed(NormLayer())
self.relu2 = TimeDistributed(Activation('relu'))
self.conv3 = TimeDistributed(ConvLayer(filters3, (1,1), kernel_initializer=kernel_initializer,padding='same'))
self.bn3 = TimeDistributed(NormLayer())
self.relu3 = TimeDistributed(Activation('relu'))
self.add = Add()
def call(self, x):
residual = x
h = self.conv1(x)
h = self.bn1(h)
h = self.relu1(h)
h = self.conv2(h)
h = self.bn2(h)
h = self.relu2(h)
h = self.conv3(h)
h = self.bn3(h)
h = self.relu3(h)
# Merge
output = self.add([residual, h])
return output
class Conv_bottleneck_block(tf.keras.Model):
def __init__(self,filters, kernel_size=3, strides=2, mode="2D",norm="BatchNorm",kernel_initializer='he_normal' , name=None):
"""A block that has a conv layer at shortcut.
# Arguments
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
super(Conv_bottleneck_block, self).__init__(name=name)
NormLayer = define_NormLayers(norm) # Define Normalization Layers
ConvLayer = define_ConvLayer(mode) # Define ConvLayer
filters1, filters2, filters3 = filters
if mode=="1D" or mode=="2D" or mode=="3D":
# Left
self.bn1 = NormLayer()
self.relu1 = Activation('relu')
self.conv1 = ConvLayer(filters1, 1, strides=strides,kernel_initializer=kernel_initializer,padding='same')
self.bn2 = NormLayer()
self.relu2 = Activation('relu')
self.conv2 = ConvLayer(filters2, kernel_size, kernel_initializer=kernel_initializer,padding='same')
self.bn3 = NormLayer()
self.relu3 = Activation('relu')
self.conv3 = ConvLayer(filters3, 1, kernel_initializer=kernel_initializer,padding='same')
#Right(shortcut)
self.s_bn = NormLayer()
self.s_conv = ConvLayer(filters3, 1, strides=strides,
kernel_initializer=kernel_initializer,padding='same')
elif mode == "TimeD":
# Left
self.bn1 = TimeDistributed(NormLayer())
self.relu1 = TimeDistributed(Activation('relu'))
self.conv1 = TimeDistributed(ConvLayer(filters1, (1,1), strides=strides,kernel_initializer=kernel_initializer,padding='same'))
self.bn2 = TimeDistributed(NormLayer())
self.relu2 = TimeDistributed(Activation('relu'))
self.conv2 = TimeDistributed(ConvLayer(filters2, kernel_size, kernel_initializer=kernel_initializer,padding='same'))
self.bn3 = TimeDistributed(NormLayer())
self.relu3 = TimeDistributed(Activation('relu'))
self.conv3 = TimeDistributed(ConvLayer(filters3, (1,1), kernel_initializer=kernel_initializer,padding='same'))
#Right(shortcut)
self.s_bn = TimeDistributed(NormLayer())
self.s_conv = TimeDistributed(ConvLayer(filters3, (1,1), strides=strides, kernel_initializer=kernel_initializer,padding='same'))
self.add = Add()
def call(self, x):
residual = x
#Left
h = self.conv1(x)
h = self.bn1(h)
h = self.relu1(h)
h = self.conv2(h)
h = self.bn2(h)
h = self.relu2(h)
h = self.conv3(h)
h = self.bn3(h)
h = self.relu3(h)
#Right
residual = self.s_conv(residual)
residual = self.s_bn(residual)
# Merge
output = self.add([residual, h])
return output
class Identity_basic_block(tf.keras.Model):
def __init__(self, filters,kernel_size=3, mode="2D", norm="BatchNorm",kernel_initializer='he_normal' , name=None):
"""The identity block is the block that has no conv layer at shortcut.
# Arguments
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
super(Identity_basic_block, self).__init__(name=name)
NormLayer = define_NormLayers(norm) # Define Normalization Layers
ConvLayer = define_ConvLayer(mode) # Define ConvLayer
filters1, filters2 = filters
if mode=="1D" or mode=="2D" or mode=="3D":
self.bn1 = NormLayer()
self.relu1 = Activation('relu')
self.conv1 = ConvLayer(filters1, kernel_size, kernel_initializer=kernel_initializer,padding='same')
self.bn2 = NormLayer()
self.relu2 = Activation('relu')
self.conv2 = ConvLayer(filters2, kernel_size, kernel_initializer=kernel_initializer,padding='same')
elif mode=="TimeD":
self.bn1 = TimeDistributed(NormLayer())
self.relu1 = TimeDistributed(Activation('relu'))
self.conv1 = TimeDistributed(ConvLayer(filters1, kernel_size, kernel_initializer=kernel_initializer,padding='same'))
self.bn2 = TimeDistributed(NormLayer())
self.relu2 = TimeDistributed(Activation('relu'))
self.conv2 = TimeDistributed(ConvLayer(filters2, kernel_size, kernel_initializer=kernel_initializer,padding='same'))
self.add = Add()
def call(self, x):
residual = x
h = self.conv1(x)
h = self.bn1(h)
h = self.relu1(h)
h = self.conv2(h)
h = self.bn2(h)
h = self.relu2(h)
# Merge
output = self.add([residual, h])
return output
class Conv_basic_block(tf.keras.Model):
def __init__(self,filters, kernel_size=3, strides=2, mode="2D", norm="BatchNorm",kernel_initializer='he_normal', name=None):
"""A block that has a conv layer at shortcut.
# Arguments
kernel_size: default 3, the kernel size of
middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
# Returns
Output tensor for the block.
"""
super(Conv_basic_block, self).__init__(name=name)
NormLayer = define_NormLayers(norm) # Define Normalization Layers
ConvLayer = define_ConvLayer(mode) # Define ConvLayer
filters1, filters2 = filters
if mode=="1D" or mode=="2D" or mode=="3D":
# Left
self.bn1 = NormLayer()
self.relu1 = Activation('relu')
self.conv1 = ConvLayer(filters1, 1, strides=strides,kernel_initializer=kernel_initializer,padding='same')
self.bn2 = NormLayer()
self.relu2 = Activation('relu')
self.conv2 = ConvLayer(filters2, kernel_size, kernel_initializer=kernel_initializer,padding='same')
#Right(shortcut)
self.s_bn = NormLayer()
self.s_conv = ConvLayer(filters2, 1, strides=strides,kernel_initializer=kernel_initializer,padding='same')
elif mode=="TimeD":
# Left
self.bn1 = TimeDistributed(NormLayer())
self.relu1 = TimeDistributed(Activation('relu'))
self.conv1 = TimeDistributed(ConvLayer(filters1, (1,1), strides=strides,kernel_initializer=kernel_initializer,padding='same'))
self.bn2 = TimeDistributed(NormLayer())
self.relu2 = TimeDistributed(Activation('relu'))
self.conv2 = TimeDistributed(ConvLayer(filters2, kernel_size, kernel_initializer=kernel_initializer,padding='same'))
#Right(shortcut)
self.s_bn = TimeDistributed(NormLayer())
self.s_conv = TimeDistributed(ConvLayer(filters2, (1,1), strides=strides,kernel_initializer=kernel_initializer,padding='same'))
self.add = Add()
def call(self, x):
#Left
residual = x
h = self.conv1(x)
h = self.bn1(h)
h = self.relu1(h)
h = self.bn2(h)
h = self.relu2(h)
h = self.conv2(h)
#Right
residual = self.s_conv(residual)
residual = self.s_bn(residual)
# Merge
output = self.add([residual, h])
return output
class Fin_layer(tf.keras.Model):
def __init__(self,mode="2D", class_num=1000, include_top=True, pooling='avg', name=None):
super(Fin_layer, self).__init__(name=name)
self.include_top = include_top
self.mode=mode
GlobalPooling = define_GlobalPooling(mode, pooling)
if mode=="1D" or mode=="2D" or mode=="3D":
#Pooling setting
self.gp = GlobalPooling()
if self.include_top:
self.dense = Dense(class_num, 'softmax')
elif mode=="TimeD":
self.gp = TimeDistributed(GlobalPooling())
if self.include_top:
self.flat = Flatten()
self.dense = Dense(class_num, 'softmax')
def call(self, x):
output = self.gp(x)
if self.include_top and (self.mode=="1D" or self.mode=="2D" or self.mode=="3D"):
output = self.dense(output)
if self.include_top and self.mode=="TimeD":
output = self.flat(output)
output = self.dense(output)
return output
class ResnetBuilder(tf.keras.Model):
def __init__(self, class_num=1000, include_top=True, pooling='ave', mode="2D", norm="BatchNorm",kernel_initializer='he_normal', name=None):
super(ResnetBuilder, self).__init__(name=name)
if not (mode=="1D" or mode=="2D" or mode=="TimeD" or mode=="3D"):
raise Exception("'mode' value is invalid. you should use '1D' or '2D' or '3D' or 'TimeD'. Current value :",mode)
if not (pooling=="ave" or pooling=="max" or pooling==None):
raise Exception("'pooling' value is invalid. you should use 'ave' or 'max' or None. Current value :",pooling)
if not (include_top==True or include_top==False):
raise Exception("'include_top' value is invalid. you should use bool value. Current value :",include_top)
self.pooling = pooling
if name == "ResNet18":
self.stage_filters = [64, 128, 256, 512]
self.block_type = "basic"
self.reptitions = [2, 2, 2, 2]
elif name == "ResNet34":
self.stage_filters = [64, 128, 256, 512]
self.block_type = "basic"
self.reptitions = [3, 4, 6, 3]
elif name=="ResNet50":
self.stage_filters = [64, 128, 256, 512]
self.block_type = "bottleneck"
self.reptitions = [3, 4, 6, 3]
elif name=="ResNet101":
self.stage_filters = [64, 128, 256, 512]
self.block_type = "bottleneck"
self.reptitions = [3, 4, 23, 3]
elif name=="ResNet152":
self.stage_filters = [64, 128, 256, 512]
self.block_type = "bottleneck"
self.reptitions = [3, 8, 36, 3]
else:
raise Exception(" Name Error! you can use ResNet18,ResNet34,ResNet50,ResNet101, or ResNet152. Current name:",name)
# block type define
self.define_block_type()
# stage1
self.conv1 = Conv_stage1_block(filters=self.all_filters[0][0],mode=mode,norm=norm,kernel_initializer=kernel_initializer)
# stage2
self.stage2_convs = {}
self.stage2_convs[0] = self.ConvBlock(filters=self.all_filters[0],strides=1,mode=mode,norm=norm,kernel_initializer=kernel_initializer)
for rep in range(1,self.reptitions[0]):
self.stage2_convs[rep] = self.IdBlock(filters=self.all_filters[0],mode=mode,norm=norm,kernel_initializer=kernel_initializer)
# stage3
self.stage3_convs = {}
self.stage3_convs[0] = self.ConvBlock(filters=self.all_filters[1],mode=mode,norm=norm,kernel_initializer=kernel_initializer)
for rep in range(1,self.reptitions[1]):
self.stage3_convs[rep] = self.IdBlock(filters=self.all_filters[1],mode=mode,norm=norm,kernel_initializer=kernel_initializer)
# stage4
self.stage4_convs = {}
self.stage4_convs[0] = self.ConvBlock(filters=self.all_filters[2],mode=mode,norm=norm,kernel_initializer=kernel_initializer)
for rep in range(1,self.reptitions[2]):
self.stage4_convs[rep] = self.IdBlock(filters=self.all_filters[2],mode=mode,norm=norm,kernel_initializer=kernel_initializer)
# stage5
self.stage5_convs = {}
self.stage5_convs[0] = self.ConvBlock(filters=self.all_filters[3],mode=mode,norm=norm,kernel_initializer=kernel_initializer)
for rep in range(1,self.reptitions[3]):
self.stage5_convs[rep] = self.IdBlock(filters=self.all_filters[3],mode=mode,norm=norm,kernel_initializer=kernel_initializer)
# Final Layer
if self.pooling!=None:
self.fin = Fin_layer(mode=mode, include_top=include_top, class_num=class_num, pooling=self.pooling)
def define_block_type(self):
'''define block type
'''
print("original")
if self.block_type=="basic":
self.IdBlock = Identity_basic_block
self.ConvBlock = Conv_basic_block
self.all_filters = []
for s_f in self.stage_filters:
self.all_filters.append([s_f, s_f])
elif self.block_type=="bottleneck":
self.IdBlock = Identity_bottleneck_block
self.ConvBlock = Conv_bottleneck_block
self.all_filters = []
for s_f in self.stage_filters:
self.all_filters.append([s_f, s_f, s_f*4])
def call(self, x):
# stage1
h = self.conv1(x)
# stage2
for rep in range(self.reptitions[0]):
h = self.stage2_convs[rep](h)
# stage3
for rep in range(self.reptitions[1]):
h = self.stage3_convs[rep](h)
# stage4
for rep in range(self.reptitions[2]):
h = self.stage4_convs[rep](h)
# stage5
for rep in range(self.reptitions[3]):
h = self.stage5_convs[rep](h)
# Final stage
if self.pooling!=None:
output = self.fin(h)
return output
else:
return h
| 3.0625 | 3 |
src/sentry/api/endpoints/sentry_app_publish_request.py | lauryndbrown/sentry | 1 | 12766299 | <filename>src/sentry/api/endpoints/sentry_app_publish_request.py
from __future__ import absolute_import
from rest_framework.response import Response
from sentry import options
from sentry import features
from sentry.api.bases.sentryapps import SentryAppBaseEndpoint
from sentry.utils import email
class SentryAppPublishRequestEndpoint(SentryAppBaseEndpoint):
def post(self, request, sentry_app):
if not features.has('organizations:sentry-apps',
sentry_app.owner,
actor=request.user):
return Response(status=404)
# check status of app to make sure it is unpublished
if sentry_app.is_published:
return Response({'detail': 'Cannot publish already published integration'}, status=400)
if sentry_app.is_internal:
return Response({'detail': 'Cannot publish internal integration'}, status=400)
# For now, just send an email that a request to publish has been amde
message = 'User %s of organization %s wants to publish %s' % (
request.user.email, sentry_app.owner.slug, sentry_app.slug)
email.send_mail(
'Sentry App Publication Request',
message,
options.get('mail.from'),
['<EMAIL>'],
fail_silently=False
)
return Response(status=201)
| 2.3125 | 2 |
catalog/migrations/0001_initial.py | ashik4715/thanosback | 0 | 12766300 | <filename>catalog/migrations/0001_initial.py
# Generated by Django 3.0.1 on 2021-06-21 19:27
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Catalog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, unique=True)),
('genre', models.CharField(max_length=1000)),
('bio', models.CharField(max_length=2500)),
('release_date', models.DateField(verbose_name='date released')),
('review', models.IntegerField(default=0, validators=[django.core.validators.MaxLengthValidator(100), django.core.validators.MinLengthValidator(1)])),
],
),
]
| 1.851563 | 2 |
example/ner/utils.py | tipevo/webstruct | 210 | 12766301 | <reponame>tipevo/webstruct
# -*- coding: utf-8 -*-
from functools import partial
from tqdm import tqdm
pages_progress = partial(tqdm, unit=' pages', smoothing=False, leave=True)
| 1.40625 | 1 |
plugins/haveibeenpwned/icon_haveibeenpwned/util/util.py | lukaszlaszuk/insightconnect-plugins | 0 | 12766302 | <filename>plugins/haveibeenpwned/icon_haveibeenpwned/util/util.py
import requests
import time
import random
from icon_haveibeenpwned.util.log_helper import LogHelper
class HaveIBeenPwned(object):
_HEADERS = {
"User-Agent": "Rapid7 InsightConnect",
"Accept": "application/vnd.haveibeenpwned.v2+json",
}
def __init__(self, logger=None):
if logger:
self.logger = logger
else:
self.logger = LogHelper().logger
self._retries = 0
def get_request(self, url: str, key: str, params=None, max_attempts=2) -> list:
"""
:param url: specifies which haveibeenpwned api call is used
:param params: used to filter searches
:param max_attempts: how many times the plugin will retry if it receives a 429 error
:return: A boolean value 'found' for results and the results if found is True
"""
self._HEADERS["hibp-api-key"] = key
try:
response = requests.get(url=url, headers=self._HEADERS, params=params)
except requests.RequestException as e:
self.logger.error(e)
raise
if response.status_code == 200: # 200 Results found
response_json = response.json()
time.sleep(2)
return response_json
elif response.status_code == 404: # 404 Results were not found
time.sleep(2)
return []
elif response.status_code == 429: # too many requests from your IP
if "Retry-After" in response.headers:
retry = response.headers["Retry-After"]
# HIBP recommendation on adding an additional 100 millisecond delay between requests
self.logger.info(
"Too many requests. The rate limit has been exceeded." f" Will retry after back off of: {retry} sec"
)
time.sleep(retry + 0.100)
return self.get_request(url, params, max_attempts=0) # Retry get_request
else:
# Just in case we don't get a Retry-After in the header
if max_attempts > 0:
range_increase = 2**self._retries
self._retries = self._retries + 1
# set random time to wait
back_off = random.randrange(3, 5 + range_increase) # nosec
self.logger.info(
"Too many requests. The rate limit has been exceeded."
f" Will retry after back off of: {back_off} sec"
)
time.sleep(back_off) # Wait to slow down request rate
return self.get_request(url, params, max_attempts=max_attempts - 1) # Retry get_request
raise Exception(
"Too many requests. The rate limit has been exceeded. Back off has failed."
" Please run fewer workflows with the Have I been Pwned plugin"
)
elif response.status_code == 503: # DDOS protection has flagged your IP for possible abuse
raise Exception(
"Warning: HTTP 503 status code received."
" Have I Been Pwned has flagged this IP address as possibly abusive,"
" and issued a 24-hour ban. Please discontinue use of the plugin for 24 hours"
" and try again. If the issue persists, contact support."
)
else:
self.logger.error(f"An unknown error occurred status code: {response.status_code}")
raise Exception(f"{response.status_code} error")
def get_password(self, hash_start: str) -> list:
"""
:param hash_start: The first 5 characters of a SHA1 hash
:return: A list of hashes that match the hash_start param
"""
BASE_URl = "https://api.pwnedpasswords.com/range/"
url = BASE_URl + hash_start
try:
response = requests.get(url)
except requests.RequestException as e:
self.logger.error(e)
raise
hash_list = response.text.splitlines()
hash_list = [hash_start + hash_ for hash_ in hash_list]
hash_list = [hash_[:40] for hash_ in hash_list]
return hash_list
| 2.640625 | 3 |
topic_modeling.py | perkes/topic-modeling | 0 | 12766303 | <reponame>perkes/topic-modeling<filename>topic_modeling.py
from sklearn.decomposition import LatentDirichletAllocation as LDA
from sklearn.feature_extraction.text import TfidfVectorizer
from tqdm import tqdm
import pandas as pd
import numpy as np
import json, re
def topic_name(model, count_vectorizer, topics, topic_number, number_words):
if topic_number in topics:
return topics[topic_number]
words = count_vectorizer.get_feature_names()
topic = model.components_[topic_number]
topic = ' '.join([words[i] for i in topic.argsort()[:-number_words-1:-1]])
topics[topic_number] = topic
return topic
config = json.load(open('config.json', 'r'))
comments = pd.read_csv(config['input_doc'])
review_groups = [(1, comments)]
if config['groupby']:
review_groups = comments.groupby(config['groupby'])
all_comments = pd.DataFrame()
for group, frame in review_groups:
reviews = frame[config['input_column']].map(lambda x: re.sub('[,\.!?\*\$\'\"\(\)]', '', str(x)))
reviews = frame[config['input_column']].map(lambda x: str(x).lower())
count_vectorizer = TfidfVectorizer(min_df = config['min_df'], max_df = config['max_df'], stop_words = 'english')
count_data = count_vectorizer.fit_transform(reviews)
number_topics = config['topics_number']
number_words = config['topics_n_words']
lda = LDA(n_components = number_topics, doc_topic_prior = config['alpha'], topic_word_prior = config['beta'], n_jobs = -1, verbose = 1)
transformed = lda.fit_transform(count_data)
topics = {}
frame[config['output_column']] = list(map(lambda x: topic_name(lda, count_vectorizer, topics, x.argmax(), number_words), tqdm(transformed)))
df = pd.DataFrame(transformed, columns = [topics[i] for i in range(len(topics))])
df.reset_index(drop = True, inplace = True)
frame.reset_index(drop = True, inplace = True)
frame = pd.concat([frame, df], axis = 1)
all_comments = pd.concat([all_comments, frame])
all_comments.to_csv(config['output_doc'], index = False) | 2.703125 | 3 |
knowledgerep/Assignment1/W4_Lab_Bhattacharjee_Rajbir_R00195734.py | bhattacharjee/aima-python | 0 | 12766304 | #!/usr/bin/env python3
import sys
import os, inspect
try:
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from logic import *
except:
print("Could not import")
sys.exit(1)
def question_b():
entails_table = [\
["False", "True"], \
["True", "False"], \
["A & B", "(A ==> B) & (B ==> A)"], \
["(A ==> B) & (B ==> A)", "A | B"], \
["(A ==> B) & (B <== A)", "~A | B"], \
["(A & B) ==> C", "(A ==> C) | (B ==> C)"] \
]
for i in entails_table:
print(f"Checking if {i[0]} entails {i[1]}")
try:
print(tt_entails(expr(i[0]), expr(i[1])))
except:
print(tt_entails(to_cnf(expr(i[0])), to_cnf(expr(i[1]))))
print("Checking for (C ∨ (¬A ∧ ¬B)) ≡ ((A ⇒ C) ∧ (B ⇒ C))")
b1 = tt_entails(expr("C | (~A | ~B)"), expr("(A ==> C) & (B ==> C)"))
b2 = tt_entails(expr("(A ==> C) & (B ==> C)"), expr("C | (~A | ~B)"))
print(b1 and b2)
entails_table = [\
["(A | B) & (~C | ~D | E)", "(A | B)"], \
["(A | B) & (~C | ~D | E)", "(A | B) & (~D | E)"]]
for i in entails_table:
print(f"Checking if {i[0]} entails {i[1]}")
try:
print(tt_entails(expr(i[0]), expr(i[1])))
except:
print(tt_entails(to_cnf(expr(i[0])), to_cnf(expr(i[1]))))
print("Checking satisfiability of (A | B) & (~(A ==> B))")
print(dpll_satisfiable(expr("(A | B) & (~(A ==> B))")))
print("checking satisfiability of ((A ==> B) & (A <== B)) & (~A | B)")
print(dpll_satisfiable(expr("((A ==> B) & (A <== B)) & (~A | B)")))
n1 = dpll_satisfiable(expr("(((A ==> B) & (A <== B)) ==> C) & (((A ==> B) & (A <== B)) <== C)"))
n2 = dpll_satisfiable(expr("(A ==> B) & (A <== B)"))
if (len(n1) == (len(n2) * 2)):
print("Both of them have the same number of models")
print("---->", len(n1), f"\n{n1}", len(n2), f"\n{n2}")
question_b()
| 2.71875 | 3 |
sympy/utilities/tests/test_source.py | JMSS-Unknown/sympy | 8 | 12766305 | <gh_stars>1-10
from sympy.utilities.source import get_mod_func, get_class, source
from sympy.utilities.pytest import raises
from sympy import point
from sympy.utilities.exceptions import SymPyDeprecationWarning
def test_source():
with raises(SymPyDeprecationWarning):
source(point)
def test_get_mod_func():
assert get_mod_func(
'sympy.core.basic.Basic') == ('sympy.core.basic', 'Basic')
def test_get_class():
_basic = get_class('sympy.core.basic.Basic')
assert _basic.__name__ == 'Basic'
| 2.015625 | 2 |
ov2640.py | FunPythonEC/OV2640_uPy | 6 | 12766306 | from ov2640_constants import *
#los scripts de constantes lores y hires son para la resolucion
#se los encierra en try except para que si no son incluidos
#no haya problema, sin embargo dara problemas luego si uno de estos
#no es usado y especificado para la iniciacion de la camara
try:
from ov2640_lores_constants import *
except Exception as e:
print(e)
try:
from ov2640_hires_constants import *
except Exception as e:
print(e)
try:
from ov2640_config import *
except Exception as e:
print(e)
import machine
import time
import ubinascii
import uos
import gc
class ov2640(object):
def __init__(self, sclpin=22, sdapin=21, cspin=15, sckpin=14, mosipin=13, misopin=12, resolution=OV2640_320x240_JPEG, imagedecode=OV2640_YUV422):
gc.enable()
#I2C pins
self.sclpin=sclpin
self.sdapin=sdapin
#SPI pins
self.sckpin=sckpin
self.mosipin=mosipin
self.misopin=misopin
self.cspin=cspin
self.standby=False #variable para control de estado de camara
#iniciacion de buses para la comunicacion
self.hspi = machine.SPI(1, baudrate=80000000, polarity=0, phase=0, sck=machine.Pin(self.sckpin), mosi=machine.Pin(self.mosipin), miso=machine.Pin(self.misopin))
self.i2c = machine.I2C(scl=machine.Pin(22), sda=machine.Pin(21), freq=1000000)
self.hspi.init(baudrate=2000000)
#cs pin para la comunicacion spi, tener en cuenta que este puede ser cualquier gpio
self.cspin = machine.Pin(self.cspin, machine.Pin.OUT)
self.cspin.value(1)
#deteccion de la camara
addrs = self.i2c.scan()
print('ov2640_init: devices detected on on i2c:')
for a in addrs:
print('0x%x' % a)
time.sleep(1)
self.i2c.writeto_mem(SENSORADDR, 0xff, b'\x01')
# initiate system reset
self.i2c.writeto_mem(SENSORADDR, 0x12, b'\x80')
# let it come up
time.sleep_ms(100)
# jpg init registers
cam_write_register_set(self.i2c, SENSORADDR, OV2640_JPEG_INIT)
cam_write_register_set(self.i2c, SENSORADDR, imagedecode)
cam_write_register_set(self.i2c, SENSORADDR, OV2640_JPEG)
self.i2c.writeto_mem(SENSORADDR, 0xff, b'\x01')
self.i2c.writeto_mem(SENSORADDR, 0x15, b'\x00')
cam_write_register_set(self.i2c, SENSORADDR, OV2640_1600x1200_JPEG)
cam_spi_write(b'\x00', b'\x55', self.hspi, self.cspin)
res = cam_spi_read(b'\x00', self.hspi, self.cspin)
print(res)
print("ov2640 init: register test return bytes %s" % ubinascii.hexlify(res))
if (res == b'\x55'):
print("ov2640_init: register test successful")
else:
print("ov2640_init: register test failed!")
time.sleep_us(10)
self.i2c.writeto_mem(SENSORADDR, 0xff, b'\x01')
# check the camera type
time.sleep_us(50)
parta = self.i2c.readfrom_mem(SENSORADDR, 0x0a, 1)
time.sleep_us(50)
partb = self.i2c.readfrom_mem(SENSORADDR, 0x0b, 1)
if ((parta != b'\x26') or (partb != b'\x42')):
print("ov2640_init: device type does not appear to be ov2640, bytes: %s/%s" % \
(ubinascii.hexlify(parta), ubinascii.hexlify(partb)))
else:
print("ov2640_init: device type looks correct, bytes: %s/%s" % \
(ubinascii.hexlify(parta), ubinascii.hexlify(partb)))
time.sleep_us(50)
def capture_to_file(self, fn, overwrite):
# bit 0 - clear FIFO write done flag
cam_spi_write(b'\x04', b'\x01', self.hspi, self.cspin)
# bit 1 - start capture then read status
cam_spi_write(b'\x04', b'\x02', self.hspi, self.cspin)
time.sleep_ms(10)
# read status
res = cam_spi_read(b'\x41', self.hspi, self.cspin)
cnt = 0
#if (res == b'\x00'):
# print("initiate capture may have failed, return byte: %s" % ubinascii.hexlify(res))
# read the image from the camera fifo
while True:
res = cam_spi_read(b'\x41', self.hspi, self.cspin)
mask = b'\x08'
if (res[0] & mask[0]):
break
#print("continuing, res register %s" % ubinascii.hexlify(res))
time.sleep_ms(10)
cnt += 1
#print("slept in loop %d times" % cnt)
# read the fifo size
b1 = cam_spi_read(b'\x44', self.hspi, self.cspin)
b2 = cam_spi_read(b'\x43', self.hspi, self.cspin)
b3 = cam_spi_read(b'\x42', self.hspi, self.cspin)
val = b1[0] << 16 | b2[0] << 8 | b3[0]
print("ov2640_capture: %d bytes in fifo" % val)
gc.collect()
bytebuf = [ 0, 0 ]
picbuf = [ b'\x00' ] * PICBUFSIZE
l = 0
bp = 0
if (overwrite == True):
#print("deleting old file %s" % fn)
try:
uos.remove(fn)
except OSError:
pass
while ((bytebuf[0] != b'\xd9') or (bytebuf[1] != b'\xff')):
bytebuf[1] = bytebuf[0]
if (bp > (len(picbuf) - 1)):
#print("appending buffer to %s" % fn)
appendbuf(fn, picbuf, bp)
bp = 0
bytebuf[0] = cam_spi_read(b'\x3d', self.hspi, self.cspin)
l += 1
#print("read so far: %d, next byte: %s" % (l, ubinascii.hexlify(bytebuf[0])))
picbuf[bp] = bytebuf[0]
bp += 1
if (bp > 0):
#print("appending final buffer to %s" % fn)
appendbuf(fn, picbuf, bp)
print("read %d bytes from fifo, camera said %d were available" % (l, val))
return (l)
def set_mode_config(self, mode):
cam_write_register_set(self.i2c, SENSORADDR, mode)
def standby(self):
# register set select
self.i2c.writeto_mem(SENSORADDR, 0xff, b'\x01')
# standby mode
self.i2c.writeto_mem(SENSORADDR, 0x09, b'\x10')
self.standby = True
def wake(self):
# register set select
self.i2c.writeto_mem(SENSORADDR, 0xff, b'\x01')
# standby mode
self.i2c.writeto_mem(SENSORADDR, 0x09, b'\x00')
self.standby = False
def cam_write_register_set(i, addr, set):
for el in set:
raddr = el[0]
val = el[1]
if (raddr == 0xff and val == b'\xff'):
return
i.writeto_mem(SENSORADDR, raddr, val)
def cam_spi_write(address, value, hspi, cspin):
cspin.value(0)
modebit = b'\x80'
d = bytes([address[0] | modebit[0], value[0]])
hspi.write(d)
cspin.value(1)
def appendbuf(fn, picbuf, howmany):
try:
f = open(fn, 'ab')
c = 1
for by in picbuf:
if (c > howmany):
break
c += 1
f.write(bytes([by[0]]))
f.close()
except OSError:
print("error writing file")
print("write %d bytes from buffer" % howmany)
def cam_spi_read(address, hspi, cspin):
cspin.value(0)
maskbits = b'\x7f'
wbuf = bytes([address[0] & maskbits[0]])
hspi.write(wbuf)
buf = hspi.read(1)
cspin.value(1)
return (buf)
| 2.109375 | 2 |
src/natcap/ui/execution.py | phargogh/natcap.ui | 0 | 12766307 | import threading
import os
import logging
import pprint
import traceback
import tempfile
from qtpy import QtCore
LOGGER = logging.getLogger(__name__)
class Executor(QtCore.QObject, threading.Thread):
"""Executor represents a thread of control that runs a python function with
a single input. Once created with the proper inputs, threading.Thread has
the following attributes:
self.module - the loaded module object provided to __init__()
self.args - the argument to the target function. Usually a dict.
self.func_name - the function name that will be called.
self.log_manager - the LogManager instance managing logs for this script
self.failed - defaults to False. Indicates whether the thread raised an
exception while running.
self.execption - defaults to None. If not None, points to the exception
raised while running the thread.
The Executor.run() function is an overridden function from threading.Thread
and is started in the same manner by calling Executor.start(). The run()
function is extremely simple by design: Print the arguments to the logfile
and run the specified function. If an execption is raised, it is printed
and saved locally for retrieval later on.
In keeping with convention, a single Executor thread instance is only
designed to be run once. To run the same function again, it is best to
create a new Executor instance and run that."""
finished = QtCore.Signal()
def __init__(self, target, args, kwargs, logfile, tempdir=None):
QtCore.QObject.__init__(self)
threading.Thread.__init__(self)
self.target = target
self.tempdir = tempdir
if not args:
args = ()
self.args = args
if not kwargs:
kwargs = {}
self.kwargs = kwargs
if logfile is None:
logfile = os.path.join(tempfile.mkdtemp(), 'logfile.txt')
self.logfile = logfile
self.failed = False
self.exception = None
self.traceback = None
def run(self):
"""Run the python script provided by the user with the arguments
specified. This function also prints the arguments to the logfile
handler. If an exception is raised in either the loading or execution
of the module or function, a traceback is printed and the exception is
saved."""
try:
self.target(*self.args, **self.kwargs)
except Exception as error:
# We deliberately want to catch all possible exceptions.
LOGGER.exception(error)
self.failed = True
self.exception = error
self.traceback = traceback.format_exc()
finally:
LOGGER.info('Execution finished')
self.finished.emit()
| 3.34375 | 3 |
tests/conftest.py | mondeja/mkdocs-mdpo-plugin | 3 | 12766308 | """Configuration for mkdocs_mdpo_plugin tests."""
import os
import sys
from tempfile import TemporaryDirectory
import polib
import pytest
import yaml
from mkdocs import config
from mkdocs.commands.build import build
from mkdocs_mdpo_plugin.plugin import MdpoPlugin
ROOT_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
if ROOT_DIR not in sys.path:
sys.path.append(ROOT_DIR)
def _mkdocs_build(
input_files_contents,
translations,
plugin_config,
additional_config,
expected_output_files,
callback_after_first_build=None,
insert_plugin_config_at_position=-1,
interrupt_after_first_build=False,
):
with TemporaryDirectory() as site_dir, TemporaryDirectory() as docs_dir, \
TemporaryDirectory() as config_dir:
# build input files
for input_file_name, content in input_files_contents.items():
filename = os.path.join(docs_dir, input_file_name)
os.makedirs(
os.path.abspath(os.path.dirname(filename)),
exist_ok=True,
)
with open(filename, 'w') as f:
f.write(content)
mdpo_config = {}
if plugin_config:
for mdpo_plugin_config_field, _ in MdpoPlugin.config_scheme:
if mdpo_plugin_config_field in plugin_config:
mdpo_config[mdpo_plugin_config_field] = plugin_config.get(
mdpo_plugin_config_field,
)
mkdocs_config = {
'site_name': 'My site',
'site_url': 'https://foo.bar',
'docs_dir': docs_dir,
'site_dir': site_dir,
'plugins': [],
}
if additional_config:
mkdocs_config.update(additional_config)
if insert_plugin_config_at_position == -1:
mkdocs_config['plugins'].append({'mdpo': mdpo_config})
else:
mkdocs_config['plugins'].insert(
insert_plugin_config_at_position,
{'mdpo': mdpo_config},
)
config_filename = os.path.join(config_dir, 'mkdocs.yml')
with open(config_filename, 'w') as f:
yaml.dump(mkdocs_config, f)
# first build, load content to translations (Markdown -> PO files)
try:
build(config.load_config(config_filename))
except Exception:
os.remove(config_filename)
raise
if callback_after_first_build:
callback_after_first_build(locals())
if interrupt_after_first_build:
os.remove(config_filename)
return
# translate PO files
for po_filename, translation_messages in translations.items():
po_filename = os.path.join(docs_dir, os.path.normpath(po_filename))
assert os.path.isfile(po_filename)
po = polib.pofile(po_filename)
for msgid_or_msgctxt, msgstr in translation_messages.items():
if isinstance(msgstr, dict):
# case when msgctxt is passed as key
# and msgid-msgstr as value in a dict
msgid = list(msgstr.keys())[0]
msgstr = msgstr[msgid]
msgctxt = msgid_or_msgctxt
else:
msgid = msgid_or_msgctxt
msgctxt = None
_msgid_in_pofile = False
for entry in po:
if entry.msgid == msgid:
_msgid_in_pofile = True
break
assert _msgid_in_pofile, (
f"'{msgid}' not found in pofile '{po_filename}'"
)
for entry in po:
if entry.msgid == msgid:
entry.msgstr = msgstr
if msgctxt:
entry.msgctxt = msgctxt
break
for entry in po:
# 'Home' is the title given to the page by the default
# Mkdocs theme
if entry.msgid == 'Home':
continue
assert entry.msgstr, (
f"Found '{entry.msgid}' not translated in pofile"
)
po.save(po_filename)
# second build, dump translations in content (PO files -> Markdown)
try:
build(config.load_config(config_filename))
except Exception:
os.remove(config_filename)
raise
# assert that files have been translated
for filename, expected_lines in expected_output_files.items():
if not expected_lines:
raise ValueError(
'Expected file defined without output lines',
)
filename = os.path.join(site_dir, os.path.normpath(filename))
with open(filename) as f:
content = f.read()
for expected_line in expected_lines:
assert expected_line in content
os.remove(config_filename)
@pytest.fixture
def mkdocs_build():
return _mkdocs_build
| 2.140625 | 2 |
src/app/tasks/__init__.py | serious-notreally/cappa | 9 | 12766309 | default_app_config = "app.tasks.apps.TasksAppConfig"
| 1.101563 | 1 |
src/lesson_networking/socket_ipv6_address_packing.py | jasonwee/asus-rt-n14uhp-mrtg | 3 | 12766310 | import binascii
import socket
import struct
import sys
string_address = 'fdf8:f53e:61e4::18'
packed = socket.inet_pton(socket.AF_INET6, string_address)
print('Original:', string_address)
print('Packed :', binascii.hexlify(packed))
print('Unpacked:', socket.inet_ntop(socket.AF_INET6, packed))
| 2.453125 | 2 |
nse_opinf_poddmd/load_data.py | mpimd-csc/nse-opinf-poddmd | 0 | 12766311 | <filename>nse_opinf_poddmd/load_data.py
"""Loading examples matrices and snaphots data"""
__all__ = [
"get_matrices",
"load_snapshots"
]
import scipy.io
import numpy as np
import json
from nse_opinf_poddmd.cwdc_tdp_pout_vout import comp_snapshots
def savedmatsstr(problem, NV):
return 'data/' + problem + \
'__mats_NV{1}_Re{0}.mat'.format(1, NV)
## Getting matrcies function
################################
def get_matrices(problem, NV):
# function returning matrices defining the example differential eq.
#
# M*dx = A11*x + A12*p H*kron(x,x) + B1
# 0 = A12.T*x + B2
#
if problem == 'cylinderwake':
Re = 60
elif problem == 'drivencavity':
Re = 500
# geting matrix from .mat
mats = scipy.io.loadmat(savedmatsstr(problem, NV))
# Mass matrix
M = mats['M']
# matrix A11
A = 1./Re*mats['A'] + mats['L1'] + mats['L2']
A11 = -A
# matrix A12
J = mats['J']
A12 = J.T
# quadratic part H
H = mats['H']
H = -H
# Input matrix B1
fv = mats['fv'] + 1./Re*mats['fv_diff'] + mats['fv_conv']
B1 = fv
# Input matrix B2
fp = mats['fp'] + mats['fp_div']
B2 = fp
# Observation matrices Cv and Cp
Cp = mats['Cp']
Cv = mats['Cv']
return M, A11, A12, H, B1, B2, Cv, Cp
## Loading snapshots function
################################
def load_snapshots(N=1, NV=None, problem='drivencavity',
Re=500, tE=None, Nts=None, nsnapshots=None,
verbose=False, returngradp=False, odesolve=False):
if odesolve:
solverstr = '_odeint_'
else:
solverstr = ''
if odesolve:
Nts = 'dna'
# hard coded paths and dictionary for data
if problem == 'cylinderwake':
NVdict = {1: 5812, 2: 9356, 3: 19468}
elif problem == 'drivencavity':
NVdict = {1: 722, 2: 3042, 3: 6962}
if NV is None:
NV = NVdict[N]
datastr = 'snapshots_' + problem + solverstr + \
'_Re{1}_NV{0}.json'.format(NV, Re)
datastr = 'snapshots_' + problem + solverstr + \
'_Re{1}_NV{0}_tE{2}_Nts{3}_nsnaps{4}.json'.format(NV, Re, tE,
Nts, nsnapshots)
savesnapshtstrv = 'results/vel_' + datastr
savesnapshtstrp = 'results/prs_' + datastr
savesnapshtstrmomrhs = 'results/momrhs_' + datastr
savesnapshtstrcontirhs = 'results/contirhs_' + datastr
savesnapshtstrminvmomrhs = 'results/minv_momrhs_' + datastr
savesnapshtstrgradp = 'results/gradprs_' + datastr
try:
with open(savesnapshtstrv) as vdatafile:
veldict = json.load(vdatafile)
with open(savesnapshtstrp) as pdatafile:
prsdict = json.load(pdatafile)
with open(savesnapshtstrgradp) as jtpdatafile:
jtprsdict = json.load(jtpdatafile)
with open(savesnapshtstrmomrhs) as momrhsdatafile:
momrhsdict = json.load(momrhsdatafile)
with open(savesnapshtstrminvmomrhs) as minvmomrhsdatafile:
minvmomrhsdict = json.load(minvmomrhsdatafile)
with open(savesnapshtstrcontirhs) as contirhsdatafile:
contirhsdict = json.load(contirhsdatafile)
except IOError:
print('no data -- gonna compute it')
comp_snapshots(N=N, problem=problem,
Re=Re, tE=tE, Nts=Nts, nsnapshots=nsnapshots,
odesolve=odesolve)
with open(savesnapshtstrv) as vdatafile:
veldict = json.load(vdatafile)
with open(savesnapshtstrp) as pdatafile:
prsdict = json.load(pdatafile)
with open(savesnapshtstrgradp) as jtpdatafile:
jtprsdict = json.load(jtpdatafile)
with open(savesnapshtstrmomrhs) as momrhsdatafile:
momrhsdict = json.load(momrhsdatafile)
with open(savesnapshtstrminvmomrhs) as minvmomrhsdatafile:
minvmomrhsdict = json.load(minvmomrhsdatafile)
with open(savesnapshtstrcontirhs) as contirhsdatafile:
contirhsdict = json.load(contirhsdatafile)
# Assining matrices to incorporate the snapshots
V = np.empty((NV, 0), float)
MVd = np.empty((NV, 0), float)
Vd = np.empty((NV, 0), float)
Plist = []
Vlist = []
JTP = np.empty((NV, 0), float)
times = veldict.keys()
for time in times:
# Collecting velocity snapshots in a matrix
varray = np.array(veldict[time])
NV = varray.size
Vlist.append(varray.reshape((NV, 1)))
# V = np.append(V, varray, axis=1)
# Collecting pressure snapshots in a matrix
parray = np.array(prsdict[time])
NP = parray.size
Plist.append(parray.reshape((NP, 1))) # , parray, axis=1)
# Collecting grad-pressure snapshots in a matrix
try:
jtparray = np.array(jtprsdict[time])
JTP = np.append(JTP, jtparray, axis=1)
# Collecting moment snapshots in a matrix
momrhs = np.array(momrhsdict[time])
MVd = np.append(MVd, momrhs, axis=1)
# Collecting accelaration snapshots in a matrix
minvmomrhs = np.array([minvmomrhsdict[time]]).T
Vd = np.append(Vd, minvmomrhs, axis=1)
# Continuity equation
contirhs = np.array(contirhsdict[time])
if verbose:
t = np.float(time)
print('time: {0:.4f} -- |v|: {1:.2e}'.
format(t, np.linalg.norm(varray)))
print('time: {0:.4f} -- |p|: {1:.2e}'.
format(t, np.linalg.norm(parray)))
print('time: {0:.4f} -- |rhs(momentum eq)|: {1:.2e}'.
format(t, np.linalg.norm(momrhs)))
print('time: {0:.4f} -- |M^(-1)*rhs(momentum eq)|: {1:.2e}'.
format(t, np.linalg.norm(minvmomrhs)))
print('time: {0:.4f} -- |rhs(continty eq)|: {1:.2e}'.
format(t, np.linalg.norm(contirhs)))
except KeyError:
pass
# print("no snaps for `v'` et al.")
T = np.array(list(times))
T = list(map(float, T))
P = np.hstack(Plist)
V = np.hstack(Vlist)
if returngradp:
return V, Vd, MVd, P, JTP, T
else:
return V, Vd, MVd, P, T
if __name__ == '__main__':
problem = 'drivencavity'
# problem = 'cylinderwake'
if problem == 'drivencavity':
Nprob = 2
NVdict = {1: 722, 2: 3042, 3: 6962}
NV = NVdict[Nprob]
Re = 500
tE = 3 # 4.
Nts = 2**9
nsnapshots = 2**9
if problem == 'cylinderwake':
Nprob = 1
NVdict = {1: 5812, 2: 9356, 3: 19468}
NV = NVdict[Nprob]
Re = 60
tE = 2 # 0.4
Nts = 2**10
nsnapshots = 2**8
# getting system matrices
M, A11, A12, H, B1, B2, Cv, Cp = get_matrices(problem, NV)
# loading snapshots
Vodeint, _, _, _z, Todeint =\
load_snapshots(N=Nprob, problem=problem, Re=Re, tE=tE, Nts=Nts,
nsnapshots=nsnapshots, odesolve=True)
V, _, _, _, T =\
load_snapshots(N=Nprob, problem=problem, Re=Re, tE=tE, Nts=Nts,
nsnapshots=nsnapshots, odesolve=False)
import matplotlib.pyplot as plt
plt.figure(1)
try:
plt.plot(T, (Cv*V).T-(Cv*Vodeint).T)
except ValueError:
plt.plot(T, (Cv*V).T)
plt.plot(Todeint, (Cv*Vodeint).T)
plt.figure(2)
plt.plot(Todeint, (Cv*Vodeint).T)
# plt.plot(Todeint, (Cv*Vodeint).T)
plt.show()
| 2.375 | 2 |
IT Workshop on Python Lab Scripts/4c-ii Selection sort.py | AswinBarath/IT-Workshop-on-Python | 0 | 12766312 | array = list(map(int, input('Enter the array of integers to be sorted(separated by spaces):').split()))
for i in range(len(array)):
min_idx = i
for j in range(i + 1, len(array)):
if array[min_idx] > array[j]:
min_idx = j
array[i], array[min_idx] = array[min_idx], array[i]
print("Sorted array is:")
print(*array)
| 4 | 4 |
hostmanager/tomato/migrations/0006_users3.py | dswd/ToMaTo | 2 | 12766313 | <reponame>dswd/ToMaTo<gh_stars>1-10
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Network', fields ['bridge']
db.delete_unique('tomato_network', ['bridge'])
# Changing field 'Network.owner'
db.alter_column('tomato_network', 'owner_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tomato.User']))
# Adding unique constraint on 'Network', fields ['owner', 'bridge']
db.create_unique('tomato_network', ['owner_id', 'bridge'])
# Deleting field 'Element.owner_str'
db.delete_column('tomato_element', 'owner_str')
# Changing field 'Element.owner'
db.alter_column('tomato_element', 'owner_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tomato.User']))
# Deleting field 'Connection.owner_str'
db.delete_column('tomato_connection', 'owner_str')
# Changing field 'Connection.owner'
db.alter_column('tomato_connection', 'owner_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tomato.User']))
# Changing field 'Template.owner'
db.alter_column('tomato_template', 'owner_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tomato.User']))
# Adding unique constraint on 'Template', fields ['owner', 'tech', 'name']
db.create_unique('tomato_template', ['owner_id', 'tech', 'name'])
def backwards(self, orm):
# Removing unique constraint on 'Template', fields ['owner', 'tech', 'name']
db.delete_unique('tomato_template', ['owner_id', 'tech', 'name'])
# Removing unique constraint on 'Network', fields ['owner', 'bridge']
db.delete_unique('tomato_network', ['owner_id', 'bridge'])
# Changing field 'Network.owner'
db.alter_column('tomato_network', 'owner_id', self.gf('django.db.models.fields.related.ForeignKey')(null=False, to=orm['tomato.User']))
# Adding unique constraint on 'Network', fields ['bridge']
db.create_unique('tomato_network', ['bridge'])
# User chose to not deal with backwards NULL issues for 'Element.owner_str'
raise RuntimeError("Cannot reverse this migration. 'Element.owner_str' and its values cannot be restored.")
# Changing field 'Element.owner'
db.alter_column('tomato_element', 'owner_id', self.gf('django.db.models.fields.related.ForeignKey')(null=False, to=orm['tomato.User']))
# User chose to not deal with backwards NULL issues for 'Connection.owner_str'
raise RuntimeError("Cannot reverse this migration. 'Connection.owner_str' and its values cannot be restored.")
# Changing field 'Connection.owner'
db.alter_column('tomato_connection', 'owner_id', self.gf('django.db.models.fields.related.ForeignKey')(null=False, to=orm['tomato.User']))
# Changing field 'Template.owner'
db.alter_column('tomato_template', 'owner_id', self.gf('django.db.models.fields.related.ForeignKey')(null=False, to=orm['tomato.User']))
models = {
'tomato.bridge': {
'Meta': {'object_name': 'Bridge', '_ormbases': ['tomato.Connection']},
'connection_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['tomato.Connection']", 'unique': 'True', 'primary_key': 'True'})
},
'tomato.connection': {
'Meta': {'object_name': 'Connection'},
'attrs': ('tomato.lib.db.JSONField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connections'", 'to': "orm['tomato.User']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'usageStatistics': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'connection'", 'unique': 'True', 'null': 'True', 'to': "orm['tomato.UsageStatistics']"})
},
'tomato.element': {
'Meta': {'object_name': 'Element'},
'attrs': ('tomato.lib.db.JSONField', [], {}),
'connection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'elements'", 'null': 'True', 'to': "orm['tomato.Connection']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'elements'", 'to': "orm['tomato.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['tomato.Element']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'timeout': ('django.db.models.fields.FloatField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'usageStatistics': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'element'", 'unique': 'True', 'null': 'True', 'to': "orm['tomato.UsageStatistics']"})
},
'tomato.external_network': {
'Meta': {'object_name': 'External_Network', '_ormbases': ['tomato.Element']},
'element_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['tomato.Element']", 'unique': 'True', 'primary_key': 'True'}),
'network': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'instances'", 'null': 'True', 'to': "orm['tomato.Network']"})
},
'tomato.fixed_bridge': {
'Meta': {'object_name': 'Fixed_Bridge', '_ormbases': ['tomato.Connection']},
'connection_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['tomato.Connection']", 'unique': 'True', 'primary_key': 'True'})
},
'tomato.kvmqm': {
'Meta': {'object_name': 'KVMQM', '_ormbases': ['tomato.Element']},
'element_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['tomato.Element']", 'unique': 'True', 'primary_key': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tomato.Template']", 'null': 'True'})
},
'tomato.kvmqm_interface': {
'Meta': {'object_name': 'KVMQM_Interface', 'db_table': "'tomato_kvm_interface'", '_ormbases': ['tomato.Element']},
'element_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['tomato.Element']", 'unique': 'True', 'primary_key': 'True'})
},
'tomato.network': {
'Meta': {'unique_together': "(('bridge', 'owner'),)", 'object_name': 'Network', '_ormbases': ['tomato.Resource']},
'bridge': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'networks'", 'to': "orm['tomato.User']"}),
'preference': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resource_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['tomato.Resource']", 'unique': 'True', 'primary_key': 'True'})
},
'tomato.openvz': {
'Meta': {'object_name': 'OpenVZ', '_ormbases': ['tomato.Element']},
'element_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['tomato.Element']", 'unique': 'True', 'primary_key': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tomato.Template']", 'null': 'True'})
},
'tomato.openvz_interface': {
'Meta': {'object_name': 'OpenVZ_Interface', '_ormbases': ['tomato.Element']},
'element_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['tomato.Element']", 'unique': 'True', 'primary_key': 'True'})
},
'tomato.repy': {
'Meta': {'object_name': 'Repy', '_ormbases': ['tomato.Element']},
'element_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['tomato.Element']", 'unique': 'True', 'primary_key': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tomato.Template']", 'null': 'True'})
},
'tomato.repy_interface': {
'Meta': {'object_name': 'Repy_Interface', '_ormbases': ['tomato.Element']},
'element_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['tomato.Element']", 'unique': 'True', 'primary_key': 'True'})
},
'tomato.resource': {
'Meta': {'object_name': 'Resource'},
'attrs': ('tomato.lib.db.JSONField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'tomato.resourceinstance': {
'Meta': {'unique_together': "(('num', 'type'),)", 'object_name': 'ResourceInstance'},
'attrs': ('tomato.lib.db.JSONField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {}),
'ownerConnection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tomato.Connection']", 'null': 'True'}),
'ownerElement': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['tomato.Element']", 'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'tomato.template': {
'Meta': {'unique_together': "(('tech', 'name', 'owner'),)", 'object_name': 'Template', '_ormbases': ['tomato.Resource']},
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'templates'", 'to': "orm['tomato.User']"}),
'preference': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'resource_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['tomato.Resource']", 'unique': 'True', 'primary_key': 'True'}),
'tech': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'tomato.tinc': {
'Meta': {'object_name': 'Tinc', '_ormbases': ['tomato.Element']},
'element_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['tomato.Element']", 'unique': 'True', 'primary_key': 'True'})
},
'tomato.udp_tunnel': {
'Meta': {'object_name': 'UDP_Tunnel', '_ormbases': ['tomato.Element']},
'element_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['tomato.Element']", 'unique': 'True', 'primary_key': 'True'})
},
'tomato.usagerecord': {
'Meta': {'object_name': 'UsageRecord'},
'begin': ('django.db.models.fields.FloatField', [], {}),
'cputime': ('django.db.models.fields.FloatField', [], {}),
'diskspace': ('django.db.models.fields.FloatField', [], {}),
'end': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measurements': ('django.db.models.fields.IntegerField', [], {}),
'memory': ('django.db.models.fields.FloatField', [], {}),
'statistics': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'records'", 'to': "orm['tomato.UsageStatistics']"}),
'traffic': ('django.db.models.fields.FloatField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'tomato.usagestatistics': {
'Meta': {'object_name': 'UsageStatistics'},
'attrs': ('tomato.lib.db.JSONField', [], {}),
'begin': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'tomato.user': {
'Meta': {'object_name': 'User'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
}
}
complete_apps = ['tomato'] | 2.234375 | 2 |
pyclesperanto_prototype/_tier0/_available_device_names.py | tlambert-forks/pyclesperanto_prototype | 64 | 12766314 | <reponame>tlambert-forks/pyclesperanto_prototype
from .._tier0._device import filter_devices
from typing import List
def available_device_names(dev_type: str = None, score_key = None) -> List[str]:
"""Retrieve a list of names of available OpenCL-devices
Parameters
----------
dev_type : str
'cpu', 'gpu', or None; None means any type of device
score_key : callable
scoring function, accepts device and returns int, defaults to None
Returns
-------
list of OpenCL-device names
See Also
--------
filter_devices : Returns list of devices instead of device names
Examples
--------
>>> import pyclesperanto_prototype as cle
>>> gpu_devices = cle.available_device_names(dev_type="gpu")
>>> print("Available GPU OpenCL devices:" + str(gpu_devices))
>>>
>>> cpu_devices = cle.available_device_names(dev_type="cpu")
>>> print("Available CPU OpenCL devices:" + str(cpu_devices))
"""
devices = filter_devices(dev_type=dev_type, score_key=score_key)
device_names = [device.name for device in devices]
return device_names | 3.046875 | 3 |
libs/losses.py | tony92151/pedestrian_generator | 0 | 12766315 | <reponame>tony92151/pedestrian_generator
from torch.nn.functional import mse_loss
import torch
def completion_network_loss(input, output, mask):
return mse_loss(output * mask, input * mask)
def completion_network_loss_P(background, person, mask, output, lefttop, height = 128 , width = 64, devices = 'cuda:0'):
"""
* background:
- shape: batchsize * 3 * 256 * 256
* output:
- shape: batchsize * 3 * 256 * 256
"""
output_crop = torch.zeros([output.shape[0],3,height,width]).to(devices)
person_crop = torch.zeros([output.shape[0],3,height,width]).to(devices)
mask_crop = torch.zeros([output.shape[0],3,height,width]).to(devices)
#mask = torch.zeros(size = output.shape).to(devices)
batchsize = output.shape[0]
for i in range(batchsize):
left , top = lefttop[i][0],lefttop[i][1]
output_crop[i,:,:,:] = output[i,:,top : top + height , left : left + width]
person_crop[i,:,:,:] = person[i,:,top : top + height , left : left + width]
mask_crop[i,:,:,:] = mask[i,:,top : top + height , left : left + width]
#mask[ i ,:, top : top + height , left : left + width] = 1
#return 0.5*mse_loss(output_crop, person_crop) + 0.35*mse_loss(output, background) + 0.15*(-1)*mse_loss(mask_crop,person_crop)
return 0.7*mse_loss(output_crop, person_crop) + 0.3*mse_loss(output, background)
| 2.359375 | 2 |
multiinput/models/custom_keras_model.py | NeilBotelho/ibm-fl | 2 | 12766316 | """
Licensed Materials - Property of IBM
Restricted Materials of IBM
20190891
© Copyright IBM Corp. 2020 All Rights Reserved.
"""
import logging
import keras
import time
import json
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.backend import set_session
from keras import backend as k
from keras.preprocessing.image import ImageDataGenerator
from keras_preprocessing.image.numpy_array_iterator import NumpyArrayIterator
from ibmfl.util import config
from ibmfl.model.fl_model import FLModel
from ibmfl.model.model_update import ModelUpdate
from ibmfl.exceptions import FLException, LocalTrainingException
import matplotlib.pyplot as plt
from pathlib import Path
logger = logging.getLogger(__name__)
class KerasFLModel(FLModel):
"""
Wrapper class for importing keras and tensorflow.keras models.
"""
def __init__(self, model_name, model_spec, keras_model=None):
"""
Create a `KerasFLModel` instance from a Keras model.
If keras_model is provided, it will use it; otherwise it will take
the model_spec to create the model.
Assumes the `model` passed as argument is compiled.
:param model_name: String specifying the type of model e.g., Keras_CNN
:type model_name: `str`
:param model_spec: Specification of the keras_model
:type model_spec: `dict`
:param keras_model: Compiled keras model.
:type keras_model: `keras.models.Model`
"""
self.graph = tf.get_default_graph()
self.sess = tf.Session()
set_session(self.sess)
if keras_model is None:
if model_spec is None or (not isinstance(model_spec, dict)):
raise ValueError('Initializing model requires '
'a model specification or '
'compiled keras model. '
'None was provided')
# In this case we need to recreate the model from model_spec
self.model = self.load_model_from_spec(model_spec)
else:
if not issubclass(type(keras_model), (keras.models.Model,
tf.keras.models.Model)):
raise ValueError('Compiled keras model needs to be provided '
'(keras.models/tensorflow.keras.models). '
'Type provided' + str(type(keras_model)))
self.model = keras_model
self.model_type = model_name
self.model_name=model_spec['model_name']
# keras flag
if issubclass(type(self.model), keras.models.Model):
self.is_keras = True
else:
self.is_keras = False
# Default values for local training
self.batch_size = 30 # Make this 10 or lower if you get memory errors
self.epochs = 1
self.steps_per_epoch = 100
def fit_model(self, train_data, fit_params=None):
"""
Fits current model with provided training data.
:param train_data: Training data, a tuple given in the form \
(x_train, y_train) or a datagenerator of of type `keras.utils.Sequence`, \
`keras.preprocessing.image.ImageDataGenerator`
:type train_data: `np.ndarray`
:param fit_params: (optional) Dictionary with hyperparameters \
that will be used to call Keras fit function.\
Hyperparameter parameters should match keras expected values \
e.g., `epochs`, which specifies the number of epochs to be run. \
If no `epochs` or `batch_size` are provided, a default value \
will be used (1 and 128, respectively).
:type fit_params: `dict`
:return: None
"""
# Initialized with default values
batch_size = self.batch_size
epochs = self.epochs
steps_per_epoch = self.steps_per_epoch
# Extract x_train and y_train, by default,
# label is stored in the last column
# extract hyperparams from fit_param
if fit_params and ('hyperparams' in fit_params):
hyperparams = fit_params['hyperparams']
try:
training_hp = hyperparams['local']['training']
if 'batch_size' in training_hp:
batch_size = training_hp['batch_size']
else:
# In this case, use default values.
logger.info('Using default hyperparameters: '
' batch_size:' + str(self.batch_size))
if 'epochs' in training_hp:
epochs = training_hp['epochs']
else:
# In this case, use default values.
logger.info('Using default hyperparameters: '
' epochs:' + str(self.epochs))
if 'steps_per_epoch' in training_hp:
steps_per_epoch = training_hp.get('steps_per_epoch')
except Exception as ex:
logger.exception(str(ex))
logger.warning('Hyperparams badly formed.')
# In this case, use default values.
logger.info('Using default hyperparameters: '
'epochs:' + str(self.epochs) +
' batch_size:' + str(self.batch_size))
try:
# if type(train_data) is tuple and type(train_data[0]) is np.ndarray:
self.fit(
train_data, batch_size=batch_size, epochs=epochs)
# else:
# self.fit_generator(
# train_data, batch_size=batch_size, epochs=epochs, steps_per_epoch=steps_per_epoch)
except Exception as e:
logger.exception(str(e))
if epochs is None:
logger.exception('epochs need to be provided')
raise LocalTrainingException(
'Error occurred while performing model.fit')
def fit(self, train_data, batch_size, epochs):
"""
Fits current model using model.fit with provided training data.
:param train_data: Training data, a tuple \
given in the form (x_train, y_train).
:type train_data: `np.ndarray`
:param batch_size: Number of samples per gradient update.
:type batch_size: Integer
:param epochs: Number of epochs to train the model.
:type epochs: Integer
:return: None
"""
filename = f"metrics_{time.time()}.png"
full_path = Path(super().get_model_absolute_path(""))
full_path.joinpath(f"{self.model_name}").mkdir(parents=True, exist_ok=True)
x = train_data[0]
y = train_data[1]
with self.graph.as_default():
set_session(self.sess)
history=self.model.fit(x, y, batch_size=self.batch_size, epochs=epochs)
# for label in self.model.metrics_names:
# plt.plot(history.history[label],label=label)
# plt.plot(history.history["loss"],label="loss")
# plt.legend()
# plt.savefig(full_path.joinpath(filename))
def fit_generator(self, training_generator, batch_size, epochs, steps_per_epoch=None):
"""
Fits current model using model.fit_generator with provided training data generator.
:param train_data: Training datagenerator of of type `keras.utils.Sequence`, \
`keras.preprocessing.image.ImageDataGenerator`
:type train_data: `ImageDataGenerator` or `keras.utils.Sequence`
:param batch_size: Number of samples per gradient update.
:type batch_size: Integer
:param epochs: Number of epochs to train the model.
:type epochs: Integer
:param steps_per_epoch: Total number of steps (batches of samples) \
to yield from `generator` before declaring one epoch. Optional
for `Sequence` data generator`
as a number of steps.
:type steps_per_epoch: `int`
:return: None
"""
if type(training_generator) is NumpyArrayIterator and not steps_per_epoch:
raise LocalTrainingException(
"Variable steps_per_epoch cannot be None for generators not \
of type keras.utils.Sequence!")
with self.graph.as_default():
set_session(self.sess)
self.model.fit_generator(
training_generator, steps_per_epoch=steps_per_epoch, epochs=epochs)
def update_model(self, model_update):
"""
Update keras model with provided model_update, where model_update
should be generated according to `KerasFLModel.get_model_update()`.
:param model_update: `ModelUpdate` object that contains the weight \
that will be used to update the model.
:type model_update: `ModelUpdate`
:return: None
"""
if isinstance(model_update, ModelUpdate):
with self.graph.as_default():
set_session(self.sess)
w = model_update.get("weights")
self.model.set_weights(w)
else:
raise LocalTrainingException('Provided model_update should be of '
'type ModelUpdate. '
'Instead they are:' +
str(type(model_update)))
def get_model_update(self):
"""
Generates a `ModelUpdate` object that will be sent to other entities.
:return: ModelUpdate
:rtype: `ModelUpdate`
"""
w = self.model.get_weights()
return ModelUpdate(weights=w)
def predict(self, x, batch_size=128, **kwargs):
"""
Perform prediction for a batch of inputs. Note that for classification
problems, it returns the resulting probabilities.
:param x: Samples with shape as expected by the model.
:type x: `np.ndarray`
:param batch_size: Size of batches.
:type batch_size: `int`
:param kwargs: Dictionary of keras-specific arguments.
:type kwargs: `dict`
:return: Array of predictions
:rtype: `np.ndarray`
"""
with self.graph.as_default():
set_session(self.sess)
return self.model.predict(x, batch_size=batch_size, **kwargs)
def evaluate(self, test_dataset, **kwargs):
"""
Evaluates the model given testing data.
:param test_dataset: Testing data, a tuple given in the form \
(x_test, test) or a datagenerator of of type `keras.utils.Sequence`,
`keras.preprocessing.image.ImageDataGenerator`
:type test_dataset: `np.ndarray`
:param kwargs: Dictionary of metrics available for the model
:type kwargs: `dict`
"""
if type(test_dataset) is tuple:
x_test = test_dataset[0]
y_test = test_dataset[1]
return self.evaluate_model(x_test, y_test)
else:
return self.evaluate_generator_model(
test_dataset)
def evaluate_model(self, x, y, batch_size=128, **kwargs):
"""
Evaluates the model given x and y.
:param x: Samples with shape as expected by the model.
:type x: `np.ndarray`
:param y: Corresponding labels to x
:type y: `np.ndarray`
:param batch_size: Size of batches.
:type batch_size: `int`
:param kwargs: Dictionary of metrics available for the model
:type kwargs: `dict`
"""
with self.graph.as_default():
set_session(self.sess)
metrics = self.model.evaluate(x, y, batch_size=128, **kwargs)
names = self.model.metrics_names
dict_metrics = {}
if type(metrics) == list:
for metric, name in zip(metrics, names):
dict_metrics[name] = metric
else:
dict_metrics[names[0]] = metrics
filename = f"metrics_{self.model_type}"
full_path = super().get_model_absolute_path(filename)
with open(full_path,"w") as f:
for metric in dict_metrics:
f.write(f"{str(metric)}:{dict_metrics[metric]}\n")
return dict_metrics
def evaluate_generator_model(self, test_generator, batch_size=128, **kwargs):
"""
Evaluates the model based on the provided data generator.
:param test_generator: Testing datagenerator of of type `keras.utils.Sequence`, \
`keras.preprocessing.image.ImageDataGenerator`
:type train_data: `ImageDataGenerator` or `keras.utils.Sequence`
:param batch_size: Number of samples per gradient update.
:type batch_size: Integer
:return: metrics
:rtype: `dict`
"""
batch_size=self.batch_size
steps = self.steps_per_epoch
if 'steps_per_epoch' in kwargs:
steps = kwargs['steps_per_epoch']
if not type(test_generator) is NumpyArrayIterator and not steps:
raise LocalTrainingException(
"Variable steps_per_epoch cannot be None for generator not of type keras.utils.Sequence")
with self.graph.as_default():
metrics = self.model.evaluate_generator(
test_generator, steps=steps)
names = self.model.metrics_names
dict_metrics = {}
if type(metrics) == list:
for metric, name in zip(metrics, names):
dict_metrics[name] = metric
else:
dict_metrics[names[0]] = metrics
return dict_metrics
def save_model(self, filename=None):
"""
Save a model to file in the format specific to the backend framework.
:param filename: Name of the file where to store the model.
:type filename: `str`
:param path: Path of the folder where to store the model. If no path is \
specified, the model will be stored in the default data location of \
the library `DATA_PATH`.
:type path: `str`
:return: filename
"""
if filename is None:
filename = f"model_{time.time()}.h5"
full_path = Path(super().get_model_absolute_path(""))
full_path.joinpath(f"{self.model_name}").mkdir(parents=True, exist_ok=True)
self.model.save(str(full_path.joinpath(filename)))#Would be $MODEL_DIR/filename
logger.info('Model saved in path: %s.', full_path)
return filename
@staticmethod
def load_model(file_name, custom_objects={}):
"""
Loads a model from disk given the specified file_name
:param file_name: Name of the file that contains the model to be loaded.
:type file_name: `str`
:return: Keras model loaded to memory
:rtype: `keras.models.Model`
"""
# try loading model from keras
model = KerasFLModel.load_model_via_keras(file_name,
custom_objects)
if not model:
# try loading model from tf.keras
model = KerasFLModel.load_model_via_tf_keras(file_name,
custom_objects)
if model is None:
logger.error('Loading model failed! '
'An acceptable compiled model should be of type '
'(keras.models/tensorflow.keras.models)!')
raise FLException(
'Unable to load the provided compiled model!')
return model
@staticmethod
def load_model_via_keras(file_name, custom_objects={}):
"""
Loads a model from disk given the specified file_name via keras.
:param file_name: Name of the file that contains the model to be loaded.
:type file_name: `str`
:return: Keras model loaded to memory
:rtype: `keras.models.Model`
"""
# try loading model from keras
model = None
try:
model = keras.models.load_model(
file_name, custom_objects=custom_objects)
model._make_predict_function()
except Exception as ex:
logger.error(
'Loading model via keras.models.load_model failed!')
return model
@staticmethod
def load_model_via_tf_keras(file_name, custom_objects={}):
"""
Loads a model from disk given the specified file_name via tf.keras.
:param file_name: Name of the file that contains the model to be loaded.
:type file_name: `str`
:return: tf.keras model loaded to memory
:rtype: `tf.keras.models.Model`
"""
# try load from tf.keras
model = None
try:
model = tf.keras.models.load_model(
file_name, custom_objects=custom_objects)
model._make_predict_function()
except Exception as ex:
logger.error('Loading model via tf.keras.models.load_model '
'failed!')
return model
@staticmethod
def model_from_json_via_keras(json_file_name):
"""
Loads a model architecture from disk via keras
given the specified json file name.
:param json_file_name: Name of the file that contains \
the model architecture to be loaded.
:type json_file_name: `str`
:return: Keras model with only model architecture loaded to memory
:rtype: `keras.models.Model`
"""
# try loading model from keras
model = None
json_file = open(json_file_name, 'r')
f = json_file.read()
json_file.close()
try:
model = keras.models.model_from_json(f)
except Exception as ex:
logger.error('Loading model via '
'keras.models.model_from_json failed!')
return model
@staticmethod
def model_from_json_via_tf_keras(json_file_name):
"""
Loads a model architecture from disk via tf.keras
given the specified json file name.
:param json_file_name: Name of the file that contains \
the model architecture to be loaded.
:type json_file_name: `str`
:return: tf.keras model with only model architecture loaded to memory
:rtype: `tf.keras.models.Model`
"""
# try loading model from keras
model = None
json_file = open(json_file_name, 'r')
f = json_file.read()
json_file.close()
try:
model = tf.keras.models.model_from_json(f)
except Exception as ex:
logger.error(
'Loading model via tf.keras.models.model_from_json failed! ')
return model
@staticmethod
def load_model_from_spec(model_spec):
"""
Loads model from provided model_spec, where model_spec is a `dict`
that contains two items: model_spec['model_architecture'] has a
pointer to the file where the keras model architecture in stored
in json format, and model_spec['model_weights'] contains
the path where the associated weights are stored as h5.
:return: model
:rtype: `keras.models.Model`
"""
if 'model_definition' in model_spec:
model_file = model_spec['model_definition']
model_absolute_path = config.get_absolute_path(model_file)
custom_objects = {}
if 'custom_objects' in model_spec:
custom_objects_config = model_spec['custom_objects']
for custom_object in custom_objects_config:
key = custom_object['key']
value = custom_object['value']
path = custom_object['path']
custom_objects[key] = config.get_attr_from_path(
path, value)
model = KerasFLModel.load_model(model_absolute_path,
custom_objects=custom_objects)
else:
# Load architecture from json file
try:
model = KerasFLModel.model_from_json_via_keras(
model_spec['model_architecture'])
if not model:
model = KerasFLModel.model_from_json_via_tf_keras(
model_spec['model_architecture'])
if model is None:
logger.error(
'An acceptable compiled model should be of type '
'(keras.models/tensorflow.keras.models)!')
raise FLException(
'Unable to load the provided compiled model!')
except Exception as ex:
logger.error(str(ex))
raise FLException(
'Unable to load the provided compiled model!')
# Load weights from h5 file
if 'model_weights' in model_spec:
model.load_weights(model_spec['model_weights'])
# model.load_weights(weights)
# Compile model with provided parameters:
compiled_option = model_spec['compile_model_options']
try:
if 'optimizer' in compiled_option:
optimizer = compiled_option['optimizer']
else:
logger.warning('No optimizer information was provided '
'in the compile_model_options, '
'set keras optimizer to default: SGD')
optimizer = 'sgd'
if 'loss' in compiled_option:
loss = compiled_option['loss']
else:
logger.warning('No loss function was provided '
'in the compile_model_options.'
'set keras loss function to default: None')
loss = None
if 'metrics' in compiled_option:
metrics = compiled_option['metrics']
metrics = [metrics] if isinstance(
metrics, str) else metrics
else:
logger.warning('No metrics information was provided '
'in the compile_model_options,'
'set keras metrics to default: None')
metrics = None
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
except Exception as ex:
logger.exception(str(ex))
logger.exception('Failed to compiled keras model.')
return model
def expand_model_by_layer_name(self, new_dimension, layer_name="dense"):
"""
Expand the current Keras model with provided dimension of
the hidden layers or model weights.
This method by default expands the dense layer of
the current neural network.
It can be extends to expand other layers specified by `layer_name`,
for example, it can be use to increase the number of CNN filters or
increase the hidden layer size inside LSTM.
:param new_dimension: New number of dimensions for \
the fully connected layers
:type new_dimension: `list`
:param layer_name: layer's name to be expanded
:type layer_name: `str`
:return: None
"""
if new_dimension is None:
raise FLException('No information is provided for '
'the new expanded model. '
'Please provide the new dimension of '
'the resulting expanded model.')
model_config = json.loads(self.model.to_json())
i = 0
for layer in model_config['config']['layers']:
# find the specified layers
if 'class_name' in layer and \
layer['class_name'].strip().lower() == layer_name:
layer['config']['units'] = new_dimension[i]
i += 1
if self.is_keras:
new_model = keras.models.model_from_json(json.dumps(model_config))
else:
new_model = tf.keras.models.model_from_json(
json.dumps(model_config))
metrics = self.model.metrics_names
if 'loss' in metrics:
metrics.remove('loss')
new_model.compile(optimizer=self.model.optimizer,
loss=self.model.loss,
metrics=metrics)
self.model = new_model
def get_gradient(self, train_data):
"""
Compute the gradient with the provided dataset at the current local
model's weights.
:param train_data: Training data, a tuple \
given in the form (x_train, y_train).
:type train_data: `np.ndarray`
:return: gradients
:rtype: `list` of `np.ndarray`
"""
with self.graph.as_default():
set_session(self.sess)
# set up symbolic variables
try:
grads = self.model.optimizer.get_gradients(
self.model.total_loss,
self.model.trainable_weights)
except Exception as ex:
logger.exception(str(ex))
raise FLException('Error occurred when defining '
'gradient expression. ')
symb_inputs = (self.model._feed_inputs +
self.model._feed_targets +
self.model._feed_sample_weights)
# define the symbolic function
if self.is_keras:
from keras import backend as k
else:
from tensorflow.python.keras import backend as k
f = k.function(symb_inputs, grads)
try:
x, y, sample_weight = self.model._standardize_user_data(
train_data[0],
train_data[1])
except Exception as ex:
logger.exception(str(ex))
raise FLException('Error occurred when feeding data samples '
'to compute current gradient.')
return f(x + y + sample_weight)
def is_fitted(self):
"""
Return a boolean value indicating if the model is fitted or not.
In particular, check if the keras model has weights.
If it has, return True; otherwise return false.
:return: res
:rtype: `bool`
"""
try:
self.model.get_weights()
except Exception:
return False
return True
| 2.328125 | 2 |
coroutine/async_await.py | sherlockliu/pythonic | 0 | 12766317 | # !/usr/bin/python
from tornado import ioloop
async def cal(num):
print('cal called.')
x = await calculator(num)
print(x)
async def calculator(num):
try:
result = 0
for i in range(0, num):
result += i
# print(f'result is {result}')
raise Exception()
return result
except Exception:
pass
async def main():
await cal(100)
print('hh')
if __name__ == '__main__':
# ioloop.IOLoop.current().start()
# main()
ioloop.IOLoop.current().run_sync(main)
| 3.09375 | 3 |
theme/management/commands/reset_quota.py | hydroshare/hydroshare | 178 | 12766318 | <reponame>hydroshare/hydroshare
from django.core.management.base import BaseCommand
from django_irods.storage import IrodsStorage
from django.conf import settings
class Command(BaseCommand):
help = "Reset quota by forcing quota iRODS microservices to recalculate quota for all users."
def handle(self, *args, **options):
istorage = IrodsStorage()
# reset quota for data zone
root_path = '/{}/home/{}'.format(settings.IRODS_ZONE, settings.IRODS_USERNAME)
istorage.setAVU(root_path, 'resetQuotaDir', 1)
# reset quota for user zone
user_root_path = '/{}/home/{}'.format(settings.HS_USER_IRODS_ZONE, settings.HS_IRODS_PROXY_USER_IN_USER_ZONE)
istorage.setAVU(user_root_path, 'resetQuotaDir', 1)
| 2.015625 | 2 |
src/html_http_dao.py | Alan-Greene/wcag | 0 | 12766319 | <reponame>Alan-Greene/wcag<gh_stars>0
import requests
class HtmlHttpDao():
def get_html(self, url):
try:
html = requests.get(url,timeout=3)
html.raise_for_status()
return html.text
except requests.exceptions.RequestException as err:
print ("Html Http Dao Fatal Error:", err)
except requests.exceptions.HTTPError as errh:
print ("Html Http Dao Http Error:", errh)
except requests.exceptions.ConnectionError as errc:
print ("Html Http Dao Error Connecting:", errc)
except requests.exceptions.Timeout as errt:
print ("Html Http Dao Timeout Error:", errt)
| 3.03125 | 3 |
tests/integration/synthetic_data_test.py | gglin001/popart | 61 | 12766320 | <reponame>gglin001/popart
# Copyright (c) 2019 Graphcore Ltd. All rights reserved.
from collections import namedtuple
import numpy as np
import popart
import pytest
# `import test_util` requires adding to sys.path
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent.parent))
import test_util as tu
np.random.seed(0)
_DataType = namedtuple('_DataType', ['builder_type', 'np_type'])
_INT8 = _DataType('INT8', np.int8)
_UINT8 = _DataType('UINT8', np.uint8)
def run_pt_session(syntheticDataMode, inputType=None, d_shape=[100]):
builder = popart.Builder()
if inputType is not None:
d0_i8 = builder.addInputTensor(
popart.TensorInfo(inputType.builder_type, d_shape))
d0 = builder.aiOnnx.cast([d0_i8], "FLOAT")
in_name = d0_i8
else:
d0 = builder.addInputTensor(popart.TensorInfo("FLOAT", d_shape))
in_name = d0
p = builder.aiGraphcore.printtensor([d0])
opts = popart.SessionOptions()
opts.syntheticDataMode = syntheticDataMode
session = popart.InferenceSession(fnModel=builder.getModelProto(),
dataFlow=popart.DataFlow(1, [p]),
userOptions=opts,
deviceInfo=tu.create_test_device())
session.prepareDevice()
anchors = session.initAnchorArrays()
stepio = popart.PyStepIO({in_name: np.ones(d_shape)}, anchors)
session.run(stepio)
def numpy_array_from_printtensor_string(string):
stringData = string.partition('{')[2].partition('}')[0]
data = np.fromstring(stringData, dtype=float, sep=',')
print(data)
return data
@tu.requires_ipu
@pytest.mark.parametrize("inputType", [_INT8, _UINT8, None])
def test_verify_synthetic_inputs(capfd, inputType):
"""
For each synthetic data mode:
1. Get a session that prints the input tensor value to stderr
2. Capture the tensor data from stderr
3. Verify that the data is as expected for that synthetic data mode
"""
# Hopefully this is large enough to achieve desired tolerance for mean/std,
# even for ints.
d_shape = [4000]
# Test depends on logging output. Silence the logging from PopART
popart.getLogger().setLevel("OFF")
## A) Expect input is all zeros
run_pt_session(popart.SyntheticDataMode.Zeros,
inputType=inputType,
d_shape=d_shape)
_, err0 = capfd.readouterr()
zeroData = numpy_array_from_printtensor_string(err0)
assert np.all(zeroData == 0)
## B) Expect input is random normal, T~N(0,1)
if inputType == _UINT8:
# Casting normal data to unsigned results in non-normal data.
return
run_pt_session(popart.SyntheticDataMode.RandomNormal,
inputType=inputType,
d_shape=d_shape)
_, err1 = capfd.readouterr()
rnData = numpy_array_from_printtensor_string(err1)
assert np.all(rnData == 0) == False
assert np.isclose(np.mean(rnData), 0, atol=0.02)
assert np.isclose(np.std(rnData), 1, atol=0.1)
def test_supported_input_type_float16():
def run_with_input_of_type(dtype):
builder = popart.Builder()
in0 = builder.addInputTensor(popart.TensorInfo(dtype, [2]))
out = builder.aiOnnx.sqrt([in0])
opts = popart.SessionOptions()
opts.syntheticDataMode = popart.SyntheticDataMode.RandomNormal
session = popart.InferenceSession(
fnModel=builder.getModelProto(),
userOptions=opts,
deviceInfo=popart.DeviceManager().createCpuDevice(),
dataFlow=popart.DataFlow(1, [out]))
run_with_input_of_type("FLOAT16")
run_with_input_of_type("FLOAT")
run_with_input_of_type("INT32")
run_with_input_of_type("UINT32")
| 1.804688 | 2 |
examples/simple/services.py | barberj/bridge-python | 0 | 12766321 | from BridgePython.bridge import Bridge
bridge = Bridge(api_key='myapikey')
#
# Publishing a Bridge service
#
# Any Python object can be published. A published service
# can be retrieved by any Bridge client with the same API key pair.
#
# Only Bridge clients using the prviate API key may publish services.
#
class TestService(object):
def ping(self, cb):
print 'Received ping request!'
cb('Pong')
bridge.publish_service('testService', TestService())
#
# Retrieving a Bridge service
#
# This can be done from any Bridge client connected to the same
# Bridge server, regardless of language.
# If multiple clients publish a Bridge service, getService will
# retrieve from the publisher with the least load.
#
def message_cb(msg):
print msg
testService = bridge.get_service('testService')
print 'Sending ping request'
testService.ping(message_cb)
bridge.connect()
| 3.078125 | 3 |
dirfile/pathlib_operations.py | zizonk/python-library | 0 | 12766322 | from pathlib import Path
import os
path = Path(os.getcwd())
npath = path.joinpath('zizi')
# print(path)
# print(npath.parts)
# print(npath.name)
# for idx, dirz in enumerate(path.iterdir()):
# print(idx, dirz)
#
for idx, file in enumerate(path.glob('*.zip')):
print(idx,file)
#
# pp = list(path.glob('*.py'))
# for line in pp[0].open():
# print(line) | 2.90625 | 3 |
CSW/csw_post_request.py | petercunning/notebook | 32 | 12766323 |
# coding: utf-8
# # Try some "RAW" requests to pycsw
# In[1]:
import requests, json
# In[2]:
headers = {'Content-Type': 'application/xml'}
# ### Try apiso:serviceType query
# In[3]:
input = '''
<csw:GetRecords xmlns:csw="http://www.opengis.net/cat/csw/2.0.2"
xmlns:ogc="http://www.opengis.net/ogc" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
outputSchema="http://www.opengis.net/cat/csw/2.0.2" outputFormat="application/xml"
version="2.0.2" service="CSW" resultType="results" maxRecords="1000"
xsi:schemaLocation="http://www.opengis.net/cat/csw/2.0.2 http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd">
<csw:Query typeNames="csw:Record">
<csw:ElementSetName>summary</csw:ElementSetName>
<csw:Constraint version="1.1.0">
<ogc:Filter>
<ogc:PropertyIsLike wildCard="*" singleChar="?" escapeChar="\">
<ogc:PropertyName>apiso:ServiceType</ogc:PropertyName>
<ogc:Literal>*WMS*</ogc:Literal>
</ogc:PropertyIsLike>
</ogc:Filter>
</csw:Constraint>
</csw:Query>
</csw:GetRecords>
'''
# Geoport pycsw instance
# In[4]:
endpoint = 'http://geoport.whoi.edu/csw'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# Geonode pycsw instance
# In[ ]:
endpoint = 'http://geonode.wfp.org/catalogue/csw'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# geodata.gov.gr pycsw instance
# In[ ]:
endpoint = 'http://geodata.gov.gr/csw'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# Data.Gov pycsw instance
# In[5]:
endpoint = 'http://catalog.data.gov/csw-all'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# PACIOOS pycsw instance
# In[6]:
endpoint = 'http://oos.soest.hawaii.edu/pacioos/ogc/csw.py'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# Data.ioos.us endpoint
# In[7]:
endpoint = 'http://data.ioos.us/csw'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# ### Try using both apiso:AnyText and apiso:ServiceType queries
# In[ ]:
input = '''
<csw:GetRecords xmlns:csw="http://www.opengis.net/cat/csw/2.0.2"
xmlns:ogc="http://www.opengis.net/ogc" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
outputSchema="http://www.opengis.net/cat/csw/2.0.2" outputFormat="application/xml"
version="2.0.2" service="CSW" resultType="results" maxRecords="1000"
xsi:schemaLocation="http://www.opengis.net/cat/csw/2.0.2 http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd">
<csw:Query typeNames="csw:Record">
<csw:ElementSetName>summary</csw:ElementSetName>
<csw:Constraint version="1.1.0">
<ogc:Filter>
<ogc:And>
<ogc:PropertyIsLike wildCard="*" singleChar="?" escapeChar="\">
<ogc:PropertyName>apiso:AnyText</ogc:PropertyName>
<ogc:Literal>*coawst*</ogc:Literal>
</ogc:PropertyIsLike>
<ogc:PropertyIsLike wildCard="*" singleChar="?" escapeChar="\">
<ogc:PropertyName>apiso:ServiceType</ogc:PropertyName>
<ogc:Literal>*OPeNDAP*</ogc:Literal>
</ogc:PropertyIsLike>
</ogc:And>
</ogc:Filter>
</csw:Constraint>
</csw:Query>
</csw:GetRecords>
'''
# In[ ]:
endpoint = 'http://geoport.whoi.edu/csw'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# In[ ]:
endpoint = 'http://catalog.data.gov/csw-all'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# In[ ]:
endpoint = 'http://data.ioos.us/csw'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# In[ ]:
endpoint = 'http://catalog.data.gov/csw-all'
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:2000]
# ### BBOX query on NGDC Geoportal Server CSW
# In[ ]:
endpoint = 'http://www.ngdc.noaa.gov/geoportal/csw'
# In[ ]:
input='''
<csw:GetRecords xmlns:csw="http://www.opengis.net/cat/csw/2.0.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ogc="http://www.opengis.net/ogc"
xmlns:gml="http://www.opengis.net/gml" outputSchema="http://www.opengis.net/cat/csw/2.0.2"
outputFormat="application/xml" version="2.0.2" service="CSW" resultType="results"
maxRecords="1000"
xsi:schemaLocation="http://www.opengis.net/cat/csw/2.0.2 http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd">
<csw:Query typeNames="csw:Record">
<csw:ElementSetName>full</csw:ElementSetName>
<csw:Constraint version="1.1.0">
<ogc:Filter>
<ogc:And>
<ogc:BBOX>
<ogc:PropertyName>ows:BoundingBox</ogc:PropertyName>
<gml:Envelope srsName="urn:ogc:def:crs:OGC:1.3:CRS84">
<gml:lowerCorner> -158.4 20.7</gml:lowerCorner>
<gml:upperCorner> -157.2 21.6</gml:upperCorner>
</gml:Envelope>
</ogc:BBOX>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>apiso:TempExtent_begin</ogc:PropertyName>
<ogc:Literal>2014-12-01T16:43:00Z</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyName>apiso:TempExtent_end</ogc:PropertyName>
<ogc:Literal>2014-12-01T16:43:00Z</ogc:Literal>
</ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyIsLike wildCard="*" singleChar="?" escapeChar="\\">
<ogc:PropertyName>apiso:AnyText</ogc:PropertyName>
<ogc:Literal>*sea_water_salinity*</ogc:Literal>
</ogc:PropertyIsLike>
</ogc:And>
</ogc:Filter>
</csw:Constraint>
</csw:Query>
</csw:GetRecords>
''';
# In[ ]:
xml_string=requests.post(endpoint, data=input, headers=headers).text
print xml_string[:650]
# ## BBOX query on PACIOOS pyCSW
# In[ ]:
input='''
<csw:GetRecords xmlns:csw="http://www.opengis.net/cat/csw/2.0.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ogc="http://www.opengis.net/ogc"
xmlns:gml="http://www.opengis.net/gml" outputSchema="http://www.opengis.net/cat/csw/2.0.2"
outputFormat="application/xml" version="2.0.2" service="CSW" resultType="results"
maxRecords="1000"
xsi:schemaLocation="http://www.opengis.net/cat/csw/2.0.2 http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd">
<csw:Query typeNames="csw:Record">
<csw:ElementSetName>full</csw:ElementSetName>
<csw:Constraint version="1.1.0">
<ogc:Filter>
<ogc:And>
<ogc:BBOX>
<ogc:PropertyName>ows:BoundingBox</ogc:PropertyName>
<gml:Envelope srsName="urn:x-ogc:def:crs:EPSG:6.11:4326">
<gml:lowerCorner> 20.7 -158.4</gml:lowerCorner>
<gml:upperCorner> 21.6 -157.2</gml:upperCorner>
</gml:Envelope>
</ogc:BBOX>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>apiso:TempExtent_begin</ogc:PropertyName>
<ogc:Literal>2014-12-01T16:43:00Z</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyName>apiso:TempExtent_end</ogc:PropertyName>
<ogc:Literal>2014-12-01T16:43:00Z</ogc:Literal>
</ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyIsLike wildCard="*" singleChar="?" escapeChar="\\">
<ogc:PropertyName>apiso:AnyText</ogc:PropertyName>
<ogc:Literal>*sea_water_salinity*</ogc:Literal>
</ogc:PropertyIsLike>
</ogc:And>
</ogc:Filter>
</csw:Constraint>
</csw:Query>
</csw:GetRecords>
''';
# In[ ]:
endpoint='http://oos.soest.hawaii.edu/pacioos/ogc/csw.py'
# In[ ]:
xml_string=requests.post(endpoint, data=input, headers=headers).text
# In[ ]:
print xml_string[:2000]
# ## Query COMT pycsw
# ### Try (lat,lon) order of bounding box with `srsName=EPSG:4326`
# In[ ]:
input='''
<csw:GetRecords xmlns:csw="http://www.opengis.net/cat/csw/2.0.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ogc="http://www.opengis.net/ogc"
xmlns:gml="http://www.opengis.net/gml" outputSchema="http://www.opengis.net/cat/csw/2.0.2"
outputFormat="application/xml" version="2.0.2" service="CSW" resultType="results"
maxRecords="1000"
xsi:schemaLocation="http://www.opengis.net/cat/csw/2.0.2 http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd">
<csw:Query typeNames="csw:Record">
<csw:ElementSetName>full</csw:ElementSetName>
<csw:Constraint version="1.1.0">
<ogc:Filter>
<ogc:And>
<ogc:BBOX>
<ogc:PropertyName>ows:BoundingBox</ogc:PropertyName>
<gml:Envelope srsName="urn:x-ogc:def:crs:EPSG:6.11:4326">
<gml:lowerCorner> 27 -100</gml:lowerCorner>
<gml:upperCorner> 30 -97</gml:upperCorner>
</gml:Envelope>
</ogc:BBOX>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>apiso:TempExtent_begin</ogc:PropertyName>
<ogc:Literal>2008-12-01T16:43:00Z</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyName>apiso:TempExtent_end</ogc:PropertyName>
<ogc:Literal>2008-06-01T16:43:00Z</ogc:Literal>
</ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyIsLike wildCard="*" singleChar="?" escapeChar="\\">
<ogc:PropertyName>apiso:AnyText</ogc:PropertyName>
<ogc:Literal>*FVCOM*</ogc:Literal>
</ogc:PropertyIsLike>
</ogc:And>
</ogc:Filter>
</csw:Constraint>
</csw:Query>
</csw:GetRecords>
''';
# In[ ]:
endpoint='http://comt.sura.org:8000/pycsw/csw.py'
# In[ ]:
xml_string=requests.post(endpoint, data=input, headers=headers).text
xml_string[:2000]
# ### Try (lon,lat) order of bounding box with `srsName=CRS84`
# In[ ]:
input='''
<csw:GetRecords xmlns:csw="http://www.opengis.net/cat/csw/2.0.2"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ogc="http://www.opengis.net/ogc"
xmlns:gml="http://www.opengis.net/gml" outputSchema="http://www.opengis.net/cat/csw/2.0.2"
outputFormat="application/xml" version="2.0.2" service="CSW" resultType="results"
maxRecords="1000"
xsi:schemaLocation="http://www.opengis.net/cat/csw/2.0.2 http://schemas.opengis.net/csw/2.0.2/CSW-discovery.xsd">
<csw:Query typeNames="csw:Record">
<csw:ElementSetName>full</csw:ElementSetName>
<csw:Constraint version="1.1.0">
<ogc:Filter>
<ogc:And>
<ogc:BBOX>
<ogc:PropertyName>ows:BoundingBox</ogc:PropertyName>
<gml:Envelope srsName="urn:ogc:def:crs:OGC:1.3:CRS84">
<gml:lowerCorner>-100 27</gml:lowerCorner>
<gml:upperCorner> -97 30</gml:upperCorner>
</gml:Envelope>
</ogc:BBOX>
<ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyName>apiso:TempExtent_begin</ogc:PropertyName>
<ogc:Literal>2008-12-01T16:43:00Z</ogc:Literal>
</ogc:PropertyIsLessThanOrEqualTo>
<ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyName>apiso:TempExtent_end</ogc:PropertyName>
<ogc:Literal>2008-06-01T16:43:00Z</ogc:Literal>
</ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyIsLike wildCard="*" singleChar="?" escapeChar="\\">
<ogc:PropertyName>apiso:AnyText</ogc:PropertyName>
<ogc:Literal>*FVCOM*</ogc:Literal>
</ogc:PropertyIsLike>
</ogc:And>
</ogc:Filter>
</csw:Constraint>
</csw:Query>
</csw:GetRecords>
''';
# In[ ]:
xml_string=requests.post(endpoint, data=input, headers=headers).text
xml_string[:2000]
# ### Woo hoo! We get 4 records returned with both (lat,lon) EPSG:4326 and (lon,lat) CRS84 queries! Success!!
# In[ ]:
endpoint='http://geoport.whoi.edu/pycsw'
# In[ ]:
xml_string=requests.post(endpoint, data=input, headers=headers).text
xml_string[:2000]
# In[ ]:
| 2.296875 | 2 |
tikreg/models.py | solleo/tikreg | 1 | 12766324 | <filename>tikreg/models.py
import time
import itertools
from collections import defaultdict as ddict
import numpy as np
from scipy import linalg as LA
from scipy.linalg import cho_factor, cho_solve
from tikreg.utils import SVD
from tikreg.kernels import lazy_kernel
import tikreg.utils as tikutils
METHOD = 'SVD'
METRIC = 'correlation'
def nan_to_num(*args, **kwargs):
return np.nan_to_num(*args, **kwargs)
def zscore(*args, **kwargs):
from scipy.stats import zscore
return zscore(*args, **kwargs)
def atleast_2d(arr):
if arr.ndim < 2:
arr = np.atleast_2d(arr).T
return arr
def _ols(X, Y, rcond=1e-08):
'''Perform OLS fit, return weight estimates
'''
return np.dot(LA.pinv(X, rcond=rcond), Y)
def ols(X, Y, rcond=1e-08):
'''Perform OLS fit, return weight estimates
'''
U, S, Vt = LA.svd(X, full_matrices=False)
gdx = S > rcond
S = S[gdx]
U = U[:, gdx]
V = Vt.T[:, gdx]
XTY = np.dot(X.T, Y)
XTXinv = np.dot(tikutils.mult_diag(1.0/S**2, V, left=False), V.T)
return np.dot(XTXinv, XTY)
def loo_ols(xtrain_samples, ytrain_samples, rcond=1e-08):
'''Leave-one out OLS
Return the mean weight across head-out folds
Parameters
----------
ytrain_samples : np.ndarray (nfolds, time, voxels)
xtrain_samples : np.ndarray (nfolds, time, features)
'''
B = 0
nreps = len(ytrain_samples)
assert nreps == len(xtrain_samples)
samples = np.arange(nreps)
for left_out in xrange(nreps):
train = samples != left_out
X = np.vstack(xtrain_samples[train])
Y = np.vstack(ytrain_samples[train])
B += ols(X, Y)/nreps
return B
def olspred(X, Y, Xtest=False):
'''Fit OLS, return predictions ``Yhat``
'''
U, S, Vt = SVD(X)
V = Vt.T
del Vt
UTY = np.dot(U.T, Y)
if (Xtest is False) or (Xtest is None):
LH = U
else:
LH = np.dot(Xtest, tikutils.mult_diag(1.0/S, V, left=False))
return np.dot(LH, UTY)
def check_response_dimensionality(train, test, allow_test_none=True):
'''Make sure matrices are 2D arrays before running models
'''
if train.ndim == 1:
train = train[...,None]
if test is not None:
if test.ndim == 1:
test = test[...,None]
else:
if not allow_test_none:
test = train
return train, test
def should_solve_dual(X, kernel):
'''Answer whether we should solve the regression
problem in the dual (kernel) space.
'''
n, p = X.shape
solve_dual = False
if p > n*0.75:
solve_dual = True
if (kernel is not None) and (kernel is not 'linear'):
solve_dual = True
return solve_dual
def clean_results_dict(results):
'''Make sure we return arrays, and ndim is at least 2D
'''
for k,v in results.items():
v = np.asarray(v)
v = v.squeeze() if k != 'performance' else v
if v.ndim <= 1: v = v[...,None]
# Update
results[k] = v
return results
def solve_l2_primal(Xtrain, Ytrain,
Xtest=None, Ytest=None,
ridges=[0], method=METHOD,
zscore_ytrain=False, zscore_ytest=False,
EPS=1e-10, verbose=False,
performance=False, predictions=False, weights=False,
metric=METRIC):
'''Solve the (primal) L2 regression problem for each L2 parameter.
'''
if metric == 'correlation':
performance_metric = tikutils.columnwise_correlation
elif metric == 'rsquared':
performance_metric = tikutils.columnwise_rsquared
else:
ValueError('Unknown metric: %s'%metric)
results = ddict(list)
Ytrain = atleast_2d(Ytrain)
if predictions:
assert Xtest is not None
if performance:
assert (Ytest is not None) and (Xtest is not None)
Ytest = atleast_2d(Ytest)
if zscore_ytrain:
Ytrain = zscore(Ytrain)
if zscore_ytest:
Ytest = zscore(Ytest)
if method == 'SVD':
U, S, Vt = SVD(Xtrain, full_matrices=False)
V = Vt.T
del Vt
gidx = S > EPS
S = S[gidx]
U = U[:, gidx]
V = V[:, gidx]
UTY = np.dot(U.T, Ytrain)
del(U)
if predictions or performance:
XtestV = np.dot(Xtest, V)
elif method == 'Chol':
XtY = np.dot(Xtrain.T, Ytrain)
XtX = np.dot(Xtrain.T, Xtrain)
for lidx, rlambda in enumerate(ridges):
if method == 'SVD':
D = S / (S**2 + rlambda**2)
elif method == 'Chol':
XtXI = XtX + rlambda**2 * np.eye(Xtrain.shape[-1])
L, lower = cho_factor(XtXI, lower=True, check_finite=False)
del XtXI
if performance:
# Compute performance
if method == 'SVD':
XVD = tikutils.mult_diag(D, XtestV, left=False)
Ypred = np.dot(XVD, UTY)
elif method == 'Chol':
cho_weights = cho_solve((L, lower), XtY)
Ypred = np.dot(Xtest, cho_weights)
cc = performance_metric(Ypred, Ytest, axis=0)
results['performance'].append(cc)
if verbose:
perf = nan_to_num(cc)
contents = (lidx +1, rlambda, np.mean(perf),
np.percentile(perf, 25), np.median(perf), np.percentile(perf, 75),
np.sum(perf > 0.0), np.sum(perf > 0.5))
txt = "lambda %02i: %8.03f, mean=%0.04f, (25,50,75)pctl=(%0.04f,%0.04f,%0.04f),"
txt += "(0.0<r>0.5): (%03i,%03i)"
print(txt % contents)
if predictions and performance:
results['predictions'].append(Ypred)
elif predictions:
# only predictions
if method == 'SVD':
XVD = tikutils.mult_diag(D, XtestV, left=False)
Ypred = np.dot(XVD, UTY)
elif method == 'Chol':
cho_weights = cho_solve((L, lower), XtY)
Ypred = np.dot(Xtest, cho_weights)
results['predictions'].append(Ypred)
if weights:
# weights
if method == 'SVD':
betas = np.dot(tikutils.mult_diag(D, V, left=False), UTY)
elif method == 'Chol':
if performance or predictions:
betas = cho_weights
else:
betas = cho_solve((L, lower), XtY)
results['weights'].append(betas)
return clean_results_dict(dict(results))
def solve_l2(Xtrain, Ytrain,
Xtest=None, Ytest=None,
ridge=0.0, verbose=False,
kernel_name='linear', kernel_param=None,
kernel_weights=False,
**kwargs):
'''Solve L2 regularized regression problem
'''
n, p = Xtrain.shape
if kernel_name is None: kernel_name = 'linear'
# figure out how to solve the problem
solve_dual = should_solve_dual(Xtrain, kernel_name) # Should's I?
if solve_dual:
ktrain_object = lazy_kernel(Xtrain, kernel_type=kernel_name)
ktrain_object.update(kernel_param, verbose=verbose)
if (Xtest is None) and (Ytest is None):
# with-in set fit
ktest_object = ktrain_object
Ytest = Ytrain
else:
# project test data to kernel
ktest_object = lazy_kernel(Xtest, Xtrain, kernel_type=kernel_name)
ktest_object.update(kernel_param, verbose=verbose)
fit = solve_l2_dual(ktrain_object.kernel, Ytrain,
ktest_object.kernel, Ytest,
ridges=[ridge],
**kwargs)
if (kernel_name == 'linear') and ('weights' in fit) and (kernel_weights is False):
fit['weights'] = np.dot(Xtrain.T, fit['weights'])
else:
if (Xtest is None) and (Ytest is None):
Xtest, Ytest = Xtrain,Ytrain
fit = solve_l2_primal(Xtrain, Ytrain,
Xtest, Ytest,
ridges=[ridge],
**kwargs)
return clean_results_dict(dict(fit))
def solve_l2_dual(Ktrain, Ytrain,
Ktest=None, Ytest=None,
ridges=[0.0], method=METHOD, EPS=1e-10, verbose=False,
performance=False, predictions=False, weights=False,
metric=METRIC,
):
'''Solve the dual (kernel) L2 regression problem for each L2 parameter.
'''
if metric == 'correlation':
performance_metric = tikutils.columnwise_correlation
elif metric == 'rsquared':
performance_metric = tikutils.columnwise_rsquared
else:
ValueError('Unknown metric: %s'%metric)
results = ddict(list)
if predictions:
assert Ktest is not None
if performance:
assert (Ytest is not None) and (Ktest is not None)
Ytest = atleast_2d(Ytest)
if method == 'SVD':
L, Q = LA.eigh(Ktrain)
if EPS is None:
gidx = np.ones(len(L), dtype=np.bool)
else:
gidx = L > EPS
L = L[gidx]
Q = Q[:, gidx]
QTY = np.dot(Q.T, Ytrain)
if predictions or performance:
KtestQ = np.dot(Ktest, Q)
for rdx, rlambda in enumerate(ridges):
if method == 'SVD':
D = 1.0 / (L + rlambda**2)
elif method == 'Chol':
KtKI = Ktrain + rlambda**2 * np.eye(Ktrain.shape[0])
L, lower = cho_factor(KtKI, lower=True, check_finite=False)
del KtKI
if performance:
if method == 'SVD':
KtestQD = tikutils.mult_diag(D, KtestQ, left=False)
Ypred = np.dot(KtestQD, QTY)
elif method == 'Chol':
cho_weights = cho_solve((L, lower), Ytrain)
Ypred = np.dot(Ktest, cho_weights)
cc = performance_metric(Ypred, Ytest)
results['performance'].append(cc)
if verbose:
perf = nan_to_num(cc)
contents = (rdx +1, rlambda, np.mean(perf),
np.percentile(perf, 25), np.median(perf), np.percentile(perf, 75),
np.sum(perf > 0.0), np.sum(perf > 0.5))
txt = "lambda %02i: %8.03f, mean=%0.04f, (25,50,75)pctl=(%0.04f,%0.04f,%0.04f),"
txt += "(0.0<r>0.5): (%03i,%03i)"
print(txt % contents)
if predictions and performance:
results['predictions'].append(Ypred)
elif predictions:
if method == 'SVD':
KtestQD = tikutils.mult_diag(D, KtestQ, left=False)
Ypred = np.dot(KtestQD, QTY)
elif method == 'Chol':
cho_weights = cho_solve((L, lower), Ytrain)
Ypred = np.dot(Ktest, cho_weights)
results['predictions'].append(Ypred)
if weights:
if method == 'SVD':
QD = tikutils.mult_diag(D, Q, left=False)
kernel_weights = np.dot(QD, QTY)
elif method == 'Chol':
if performance or predictions:
kernel_weights = cho_weights
else:
kernel_weights = cho_solve((L, lower), Ytrain)
results['weights'].append(kernel_weights)
return clean_results_dict(dict(results))
def kernel_banded_temporal_prior(kernel, temporal_prior, spatial_prior,
delays):
'''
'''
if not np.isscalar(spatial_prior):
# make sure the matrix is diagonal
assert tikutils.isdiag(spatial_prior)
assert np.allclose(np.diag(spatial_prior), spatial_prior[0,0])
spatial_prior = spatial_prior[0,0]
# get the scaling
assert np.isscalar(spatial_prior)
delayed_kernel = np.zeros_like(kernel)
for jdx, jdelay in enumerate(delays):
for idx, idelay in enumerate(delays):
if temporal_prior[idx,jdx] == 0:
continue
Ki = kernel[tikutils.delay2slice(idelay), tikutils.delay2slice(jdelay)]
tmp = temporal_prior[idx,jdx]*Ki*spatial_prior
delayed_kernel[idelay:,jdelay:] += tmp
return delayed_kernel
def kernel_spatiotemporal_prior(Xtrain, temporal_prior, spatial_prior,
Xtest=None, delays=None):
'''Compute the kernel matrix of a model with a spatio-temporal prior
temporal_prior (d, d): d = len(delays)
'''
assert delays is not None
matrix_mult = np.dot
if tikutils.isdiag(spatial_prior):
def matrix_mult(xx,yy):
di = np.diag(yy)
if np.allclose(di, di[0]):
# constant diagonal
res = xx*di[0]
else:
res = tikutils.mult_diag(di, xx, left=False)
return res
if Xtest is None:
Xtest = Xtrain
kernel = np.zeros((Xtest.shape[0], Xtrain.shape[0]))
for jdx, jdelay in enumerate(delays):
Xj = Xtrain[tikutils.delay2slice(jdelay)]
for idx, idelay in enumerate(delays):
if temporal_prior[idx,jdx] == 0:
continue
Xi = Xtest[tikutils.delay2slice(idelay)]
tmp = np.dot(temporal_prior[idx,jdx]*matrix_mult(Xi, spatial_prior), Xj.T)
kernel[idelay:,jdelay:] += tmp
return kernel
def kernel_cvridge(Ktrain, Ytrain,
Ktest=None, Ytest=None,
ridges=[0.0],
folds='cv', nfolds=5, blocklen=5, trainpct=0.8,
performance=False, predictions=False, weights=False,
metric=METRIC,
verbose=True, EPS=1e-10,
):
import time
start_time = time.time()
n = Ktrain.shape[0]
if not isinstance(folds, list):
folds = tikutils.generate_trnval_folds(n, sampler=folds,
nfolds=nfolds,
testpct=1-trainpct,
nchunks=blocklen)
else:
nfolds = len(folds)
nridges = len(ridges)
if verbose:
txt = (nridges, nfolds)
intro = 'Fitting *%i* ridges, across *%i* folds'%txt
print(intro)
results = np.empty((nfolds, nridges, Ytrain.shape[-1]))
for fdx, fold in enumerate(folds):
trn, val = fold
ntrn, nval = len(trn), len(val)
if verbose:
txt = (fdx+1,nfolds,ntrn,nval)
print('train ridge fold %i/%i: ntrain=%i, nval=%i'%txt)
Ktrn = tikutils.fast_indexing(Ktrain, trn, trn)
Kval = tikutils.fast_indexing(Ktrain, val, trn)
res = solve_l2_dual(Ktrn, Ytrain[trn],
Kval, zscore(Ytrain[val]),
ridges, EPS=EPS,
weights=False,
predictions=False,
performance=True,
metric=metric,
verbose=verbose)
results[fdx,:,:] = res['performance']
# We done, otherwise fit and predict the held-out set
if (predictions is False) and (performance is False) and (weights is False):
if verbose: print('Duration %0.04f[mins]' % ((time.time()-start_time)/60.))
return {'cvresults' : results}
# Find best parameters across responses
surface = np.nan_to_num(results.mean(0)).mean(-1).squeeze()
# find the best point in the 2D space
max_point = np.where(surface.max() == surface)
# make sure it's unique (conservative-ish biggest ridge/parameter)
max_point = map(max, max_point)
ridgeopt = ridges[max_point]
if verbose:
desc = 'held-out' if (Ktest is not None) else 'within'
outro = 'Predicting {d} set:\ncvperf={cc},ridge={alph}'
outro = outro.format(d=desc,cc=surface.max(),alph=ridgeopt)
print(outro)
fit = solve_l2_dual(Ktrain, Ytrain,
Ktest, Ytest,
ridges=[ridgeopt],
performance=performance,
predictions=predictions,
weights=weights,
EPS=EPS,
metric=metric,
verbose=verbose,
)
if verbose: print('Duration %0.04f[mins]' % ((time.time()-start_time)/60.))
fit = clean_results_dict(dict(fit))
fit['cvresults'] = results
return fit
def cvridge(Xtrain, Ytrain,
Xtest=None, Ytest=None,
ridges=[0.0],
Li=None,
kernel_name='linear', kernel_params=None,
folds='cv', nfolds=5, blocklen=5, trainpct=0.8,
verbose=True, EPS=1e-10,
withinset_test=False,
performance=False, predictions=False, weights=False,
kernel_weights=False,
metric=METRIC):
"""Cross-validation procedure for tikhonov regularized regression.
Parameters
----------
Xtrain (n, p):
Design matrix
Ytrain (n, v):
Training set responses
Xtest (None, (m, p)):
Design matrix for held-out set
Ytest (None, (m, v)):
Held-out set responses
ridges (r,):
Ridge parameters to evaluate
Li (q,p):
Generalized tikhonov regression. This solves the problem with a
prior on $\beta$ determined by $L^\top L$. The problem is solved
in the standard form and in kernel space if necessary.
kernel_name (str):
Kernel to use
kernel_params (None, (k,)):
Kernel parameters to cross-validate
folds (str, list):
* (str) Type of cross-validation
- 'cv' - cross-validation with chunks of size ``blocklen``
- 'nbb' - block boostrap with chunks of size ``blocklen``
- 'mbb' - moving/overlapping block bootstrap chunks of size ``blocklen``
* (list) Can also be a list of (train, test) pairs: [(trn1, test1),...]
nfolds (int):
Number of learning folds
blocklen (int):
Chunk data into blocks of this size, and sample these blocks
trainpct (float 0-1):
Percentage of data to use in training if using a bootstrap sampler.
withinset_test (bool):
If no ``Xtest`` or ``Ytest`` is given and ``predictions`` and/or
``performance`` are requested, compute these testues based on training set.
performance (bool):
Held-out prediction performance
predictions (bool):
Held-out timecourse predictions
weights (bool):
Weight estimates on training set (does not depend on (``Xtest``,``Ytest``)
kernel_weights (bool):
Whether to project kernel weights into feature weights.
If True, the kernel weights are returned. This is useful when fitting
large models and storing the feature weights is expensive.
verbose (bool):
Verbosity
EPS (float):
Testue used to threshold small eigentestues
Returns
-------
fit (optional; dict):
cross-validation results per response for each fold, kernel and L2 parameters
* cvresults (``nfolds``, len(``kernel_params``), len(``ridges``), nresponses)
If a held-out set (``Xtest`` and ``Ytest``) is specified, performs the
fit on the full training set with the optimal L2 and kernel parameters.
It returns, as requested, any of:
* predictions (m, v) ``Ytest`` prediction for each voxel
* performance (v,) correlation coefficient of predictions and ``Ytest``
* weights: (p, v) for linear models, (n by v) for non-linear models
"""
import time
start_time = time.time()
if kernel_name is None: raise TestueError('Say linear if linear')
kernel_params = [None] if (kernel_name == 'linear') else kernel_params
nkparams = len(kernel_params)
Ytrain, Ytest = check_response_dimensionality(Ytrain, Ytest, allow_test_none=True)
Xtrain, Xtest = check_response_dimensionality(Xtrain, Xtest, allow_test_none=True)
# Check for generalized tikhonov
if Li is not None:
Xtrain = np.dot(Xtrain, Li)
n, p = Xtrain.shape
if not isinstance(folds, list):
folds = tikutils.generate_trnval_folds(n, sampler=folds,
nfolds=nfolds,
testpct=1-trainpct,
nchunks=blocklen)
if kernel_name != 'linear':
# if kernel is not linear we need to get a list to re-use folds
folds = [(trn,test) for trn,test in folds]
else:
nfolds = len(folds)
nridges = len(ridges)
if verbose:
txt = (nridges, nfolds, nkparams, kernel_name)
intro = 'Fitting *%i* ridges, across *%i* folds, and *%i* "%s" kernel parameters'%txt
print(intro)
# figure out how to solve the problem
solve_dual = should_solve_dual(Xtrain, kernel_name)
if solve_dual:
if verbose: print('Caching *%s* kernel'%kernel_name)
ktrain_object = lazy_kernel(Xtrain, kernel_type=kernel_name)
results = np.empty((nfolds, nkparams, nridges, Ytrain.shape[-1]))
for kdx, kernel_param in enumerate(kernel_params):
if solve_dual:
ktrain_object.update(kernel_param)
kernel = ktrain_object.kernel
if verbose:
txt = (kernel_name,kdx+1,nkparams,str(kernel_param))
print('Updating *%s* kernel %i/%i:%s'%txt)
for fdx, fold in enumerate(folds):
trn, val = fold
ntrn, nval = len(trn), len(val)
if verbose:
txt = (fdx+1,nfolds,ntrn,nval)
print('train ridge fold %i/%i: ntrain=%i, nval=%i'%txt)
if solve_dual is False:
res = solve_l2_primal(Xtrain[trn], Ytrain[trn],
Xtrain[val], zscore(Ytrain[val]),
ridges, EPS=EPS,
weights=False,
predictions=False,
performance=True,
metric=metric,
verbose=verbose,
)
else:
Ktrain = tikutils.fast_indexing(kernel,trn, trn)
Kval = tikutils.fast_indexing(kernel,val, trn)
res = solve_l2_dual(Ktrain, Ytrain[trn],
Kval, zscore(Ytrain[val]),
ridges, EPS=EPS,
weights=False,
predictions=False,
performance=True,
metric=metric,
verbose=verbose,
)
# Store results
results[fdx,kdx,:,:] = res['performance']
del(res)
# We done, otherwise fit and predict the held-out set
if (predictions is False) and (performance is False) and (weights is False):
if verbose: print('Duration %0.04f[mins]' % ((time.time()-start_time)/60.))
return {'cvresults' : results}
# Find best parameters across responses
surface = np.nan_to_num(results.mean(0)).mean(-1)
# find the best point in the 2D space
max_point = np.where(surface.max() == surface)
# make sure it's unique (conservative-ish biggest ridge/parameter)
max_point = map(max, max_point)
# The maximum point
kernmax, ridgemax = max_point
kernopt, ridgeopt = kernel_params[kernmax], ridges[ridgemax]
if verbose:
desc = 'held-out' if (Xtest is not None) else 'within'
outro = 'Predicting {d} set:\ncvperf={cc},ridge={alph},kernel={kn},kernel_param={kp}'
outro = outro.format(d=desc,cc=surface.max(),alph=ridgeopt,
kn=kernel_name,kp=kernopt)
print(outro)
if solve_dual:
# Set the parameter to the optimal
ktrain_object.update(kernopt, verbose=verbose)
if Ytest is not None:
Ytest = zscore(Ytest)
if Xtest is not None:
if Li is not None: Xtest = np.dot(Xtest, Li)
# project test data to kernel
ktest_object = lazy_kernel(Xtest, Xtrain, kernel_type=kernel_name)
ktest_object.update(kernopt, verbose=verbose)
ktest = ktest_object.kernel
elif withinset_test:
# predict within set if so desired
ktest = ktrain_object.kernel
Ytest = zscore(Ytrain)
else:
ktest = None
fit = solve_l2_dual(ktrain_object.kernel, Ytrain,
ktest, Ytest,
ridges=[ridgeopt],
performance=performance,
predictions=predictions,
weights=weights,
EPS=EPS,
metric=metric,
verbose=verbose,
)
# Project to linear space if we can
if (kernel_name == 'linear') and ('weights' in fit) and (not kernel_weights):
fit['weights'] = np.dot(Xtrain.T, fit['weights'])
else:
if Ytest is not None:
Ytest = zscore(Ytest)
if Xtest is not None:
if Li is not None: Xtest = np.dot(Xtest, Li)
elif withinset_test:
Xtest = Xtrain
Ytest = zscore(Ytrain)
fit = solve_l2_primal(Xtrain, Ytrain,
Xtest, Ytest,
ridges=[ridgeopt],
performance=performance,
predictions=predictions,
weights=weights,
metric=metric,
verbose=verbose,
EPS=EPS,
)
if (Li is not None) and ('weights' in fit):
# project back
fit['weights'] = np.dot(Li, fit['weights'])
if verbose: print('Duration %0.04f[mins]' % ((time.time()-start_time)/60.))
fit = clean_results_dict(dict(fit))
fit['cvresults'] = results
return fit
def simple_ridge_dual(X, Y, ridge=10.0):
'''Return weights for linear kernel ridge regression'''
K = np.dot(X, X.T)
kinv = np.linalg.inv(K + ridge*np.eye(K.shape[0]))
return np.dot(X.T, np.dot(kinv, Y))
def simple_ridge_primal(X, Y, ridge=10.0):
'''Return weights for ridge regression'''
XTY = np.dot(X.T, Y)
XTXinv = np.linalg.inv(np.dot(X.T, X) + ridge*np.eye(X.shape[-1]))
return np.dot(XTXinv, XTY)
def simple_generalized_tikhonov(X, Y, L, ridge=10.0):
'''Direct implementation of generalized tikhonov regression
'''
XTXLTL = np.dot(X.T, X) + ridge*np.dot(L.T, L)
XTY = np.dot(X.T, Y)
betas = np.dot(LA.inv(XTXLTL), XTY)
return betas
def generalized_tikhonov(X, Y, Li, ridge=10.0):
'''Implementation fo tikhonov regression using the
standard transform (cf. Hansen, 1998).
'''
A = np.dot(X, Li)
ATY = np.dot(A.T, Y)
ATAIi = LA.inv(np.dot(A.T, A) + ridge*np.identity(A.shape[-1]))
weights = np.dot(ATAIi, ATY)
betas = np.dot(Li, weights)
return betas
def _generalized_tikhonov_dual(X, Y, Li, ridge=10.0):
'''check kernel representation also works
'''
A = np.dot(X, Li)
AATIi = LA.inv(np.dot(A, A.T) + ridge*np.identity(A.shape[0]))
rlambdas = np.dot(AATIi, Y)
weights = np.dot(A.T, rlambdas)
betas = np.dot(Li, weights)
return betas
def find_optimum_mvn(response_cvmean,
temporal_hhparams,
spatial_hyparams,
ridge_hyparams):
'''
'''
optimum = np.unravel_index(np.argmax(response_cvmean),
response_cvmean.shape)
temporal_argmax = optimum[0]
temporal_optimum = temporal_hhparams[temporal_argmax]
spatial_argmax = optimum[1]
spatial_optimum = spatial_hyparams[spatial_argmax]
ridge_argmax = optimum[2]
ridge_optimum = ridge_hyparams[ridge_argmax]
return temporal_optimum, spatial_optimum, ridge_optimum
def crossval_stem_wmvnp(features_train,
responses_train,
ridges=np.logspace(0,3,10),
temporal_prior=None,
feature_priors=None,
population_mean=False,
folds=(1,5),
method='SVD',
verbosity=1,
chunklen=True,
kernel_features=False,
normalize_kernel=False,
normalize_hyparams=False,
metric=METRIC,
zscore_ytrain=False,
zscore_yval=False,
weights=False,
predictions=False,
):
'''Cross-validation procedure for
spatio-temporal encoding models with MVN priors.
'''
import time
start_time = time.time()
### optimize solution
doitfast = [False]*len(features_train)
for fi, fp in enumerate(feature_priors):
if (tikutils.isdiag(fp.asarray) and
np.allclose(np.diag(fp.asarray), fp.asarray[0,0])):
doitfast[fi] = True
kernel_estimate = kernel_spatiotemporal_prior
# check whether we can use faster diagonal method
if np.allclose(doitfast, True):
kernel_estimate = kernel_banded_temporal_prior
if kernel_features is False:
# cache the kernels
features_train = [np.dot(X, X.T) for X in features_train]
if isinstance(verbosity, bool):
verbosity = 1 if verbosity else 0
if isinstance(features_train, np.ndarray):
features_train = [features_train]
nridges = len(ridges)
delays = temporal_prior.delays
ndelays = len(delays)
chunklen = ndelays if (chunklen is True) else (chunklen if chunklen else 1)
nfeatures = [fs.shape[1] for fs in features_train]
nresponses = responses_train.shape[-1]
ntrain = responses_train.shape[0]
kernel_normalizer = 1.0
#### handle cross-validation folds options
if isinstance(folds, list):
# pre-defined folds
nfolds = len(folds)
elif np.isscalar(folds):
# do k-fold cross-validation only once
nfolds = (1, folds)
else:
# do k-fold cross-validation N times
assert isinstance(folds, tuple)
if isinstance(folds, tuple):
# get cv folds (train, val) indeces
nfolds = folds
folds = tikutils.generate_trnval_folds(ntrain,
sampler='bcv',
nfolds=nfolds,
nchunks=chunklen)
nfolds = np.prod(nfolds)
folds = list(folds)
# get temporal hyper-prior hyper-parameters from object
all_temporal_hhparams = [temporal_prior.get_hhparams()]
# get feature prior hyper parameters
all_spatial_hyparams= [t.get_hyparams() for t in feature_priors]
# all combinations
all_hyperparams = list(itertools.product(*(all_temporal_hhparams + all_spatial_hyparams)))
nall_cvparams = len(all_hyperparams)
# count parametres
ntemporal_hhparams = np.prod([len(t) for t in all_temporal_hhparams])
nspatial_hyparams = np.prod([len(t) for t in all_spatial_hyparams])
# store cross-validation performance
results = np.zeros((nfolds,
ntemporal_hhparams,
nspatial_hyparams,
nridges,
1 if population_mean else nresponses),
)
if predictions or weights:
sample_counter = np.zeros(responses_train.shape[0]).astype(np.int)
for ifold, (trnidx, validx) in enumerate(folds):
sample_counter[validx] += 1
sample_max = sample_counter.max()
if predictions:
results_predictions = np.zeros((sample_max,
ntemporal_hhparams,
nspatial_hyparams,
responses_train.shape[0], # ntimepoints
nridges,
nresponses,
),
dtype=np.float32,
)
if weights:
results_weights = []
sp_hyparams = []
scaled_ridges = np.atleast_1d(ridges).copy()
# start iterating through spatio-temporal hyparams
for hyperidx, spatiotemporal_hyperparams in enumerate(all_hyperparams):
sample_counter = np.zeros(responses_train.shape[0]).astype(np.int)
# hyhperparameters
temporal_hhparam = spatiotemporal_hyperparams[0]
spatial_hyparams = spatiotemporal_hyperparams[1:]
# map hyparams to surface of sphere
if normalize_hyparams:
spatial_hyparams /= np.linalg.norm(spatial_hyparams)
sp_hyparams.append(spatial_hyparams)
# apply the hyperparameter to the hyper-prior on the temporal prior
this_temporal_prior = temporal_prior.get_prior(hhparam=temporal_hhparam)
# get spatial and temporal parameter indeces
shyperidx = np.mod(hyperidx, nspatial_hyparams)
thyperidx = int(hyperidx // nspatial_hyparams)
if verbosity:
hyperdesc = (hyperidx+1, nall_cvparams,
thyperidx+1, ntemporal_hhparams, temporal_hhparam,
shyperidx+1, nspatial_hyparams) + tuple(spatial_hyparams)
hypertxt = "%i/%i: temporal %i/%i=%0.03f, "
hypertxt += "features %i/%i=(%0.04f, "
hypertxt += ', '.join(["%0.04f"]*(len(spatial_hyparams)-1)) + ')'
print(hypertxt % hyperdesc)
Ktrain = 0.0
# iterate through feature matrices, priors, and hyparams
# to construct spatio-temporal kernel for full training set
for fdx, (fs_train, fs_prior, fs_hyper) in enumerate(zip(features_train,
feature_priors,
spatial_hyparams)):
# compute spatio-temporal kernel for this feature space given
# spatial prior hyparams, and temporal prior hyper-prior hyparams
kernel_train = kernel_estimate(fs_train,
this_temporal_prior,
fs_prior.get_prior(fs_hyper),
delays=delays)
# store this feature space spatio-temporal kernel
Ktrain += kernel_train
if (normalize_kernel is True) and (hyperidx == 0):
# normalize all ridges by the determinant of the first kernel
kernel_normalizer = tikutils.determinant_normalizer(Ktrain)
if np.allclose(kernel_normalizer, 0):
# invalid determinant, do not scale
kernel_normalizer = 1.0
scaled_ridges *= np.sqrt(kernel_normalizer)
if kernel_normalizer != 1:
Ktrain /= kernel_normalizer
# perform cross-validation procedure
for ifold, (trnidx, validx) in enumerate(folds):
# extract training and validation sets from full kernel
ktrn = tikutils.fast_indexing(Ktrain, trnidx, trnidx)
kval = tikutils.fast_indexing(Ktrain, validx, trnidx)
sample_counter[validx] += 1
if verbosity > 1:
txt = (ifold+1,nfolds,len(trnidx),len(validx))
print('train fold %i/%i: ntrain=%i, ntest=%i'%txt)
# solve the regression problem
norm_train = zscore if zscore_ytrain else lambda x: x
norm_val = zscore if zscore_yval else lambda x: x
fit = solve_l2_dual(ktrn, norm_train(responses_train[trnidx]),
kval, norm_val(responses_train[validx]),
ridges=ridges,
performance=True,
verbose=verbosity > 1,
method=method,
metric=metric,
predictions=predictions,
weights=weights,
)
if population_mean:
# only keep mean population performance on the validation set
cvfold = np.nan_to_num(fit['performance']).mean(-1)[...,None]
else:
# keep all individual responses' performance on validtion set
cvfold = fit['performance']
results[ifold, thyperidx, shyperidx] = cvfold
if predictions:
fold_iteration = sample_counter[validx] - 1
preds = fit['predictions'].swapaxes(0,1) # time, ridges, voxels
for count in np.unique(fold_iteration):
time_mask = fold_iteration == count
results_predictions[count, thyperidx, shyperidx, validx] = preds[time_mask]
if verbosity:
# print performance for this spatio-temporal hyperparameter set
perf = nan_to_num(results[:,thyperidx,shyperidx].mean(0))
bestperf = np.nanmax(perf, 0) # across ridges explored
group_ridge = ridges[np.argmax(np.nanmean(perf, -1))] # across population
contents = (group_ridge, np.mean(perf),
np.percentile(bestperf, 25), np.median(bestperf), np.percentile(bestperf, 75),
np.sum(bestperf > 0.0), np.sum(bestperf > 0.5))
txt = "pop.cv.best: %6.03f, mean=%0.04f, (25,50,75)pctl=(%0.04f,%0.04f,%0.04f),"
txt += "(0.0<r>0.5): (%03i,%03i)"
print(txt % contents)
#### dimensions explored
dtype = np.dtype([('nfolds', np.int),
('ntemporal_hhparams', np.int),
('nspatial_hyparams', np.int),
('nridges', np.int),
('nresponses', np.int),
('nfspaces', np.int)])
dims = np.recarray(shape=(1), dtype=dtype)
dims[0] = np.asarray([(nfolds,
ntemporal_hhparams,
nspatial_hyparams,
nridges,
nresponses,
len(features_train))],
dtype=dtype)
# spatial hyparams. all the same across temporal
sp_hyparams = np.asarray(sp_hyparams)[:nspatial_hyparams]
if verbosity:
print('Duration %0.04f[mins]' % ((time.time()-start_time)/60.))
out = {'cvresults' : results,
'dims' : dims,
'spatial' : sp_hyparams,
'ridges' : scaled_ridges,
'temporal' : temporal_prior.get_hhparams(),
}
if predictions:
out['cvpreds'] = results_predictions
return out
def estimate_stem_wmvnp(features_train,
responses_train,
features_test=None,
responses_test=None,
ridges=np.logspace(0,3,10),
normalize_hyparams=False,
normalize_kernel=False,
temporal_prior=None,
feature_priors=None,
weights=False,
predictions=False,
performance=False,
folds=(1,5),
method='SVD',
verbosity=1,
cvresults=None,
population_optimal=False,
keep_cvfolds=True,
chunklen=True,
metric=METRIC,
):
'''
'''
start_time = time.time()
delays = temporal_prior.delays
ndelays = len(delays)
if features_test is None:
features_test = [features_test]*len(features_train)
if cvresults is None:
# find optimal hyperparamters via Nx k-fold cross-validation
cvresults = crossval_stem_wmvnp(features_train,
responses_train,
ridges=ridges,
normalize_hyparams=normalize_hyparams,
normalize_kernel=normalize_kernel,
temporal_prior=temporal_prior,
feature_priors=feature_priors,
population_mean=population_optimal,
folds=folds,
method=method,
verbosity=verbosity,
chunklen=chunklen,
metric=metric,
)
if (weights is False) and (performance is False) and (predictions is False):
return cvresults
# find optima across cross-validation folds
cvmean = cvresults['cvresults'].mean(0)
if keep_cvfolds is False:
# clear memory
cvresults['cvresults'] = cvmean
if 'dims' in cvresults:
dims = cvresults['dims']
nresponses = int(dims.nresponses)
nfspaces = int(dims.nfspaces)
ntspaces = 1
else:
# infer from data
nresponses = cvmean.shape[-1]
nfspaces = len(features_train)
ntspaces = 1
if population_optimal is True and (nresponses > 1):
cvmean = np.nan_to_num(cvmean).mean(-1)[...,None]
ncvresponses = 1 if population_optimal else nresponses
optima = np.zeros((ncvresponses, nfspaces + ntspaces + 1))
for idx in range(ncvresponses):
# find response optima
temporal_opt, spatial_opt, ridge_opt = find_optimum_mvn(cvmean[...,idx],
cvresults['temporal'],
cvresults['spatial'],
cvresults['ridges'])
optima[idx] = tuple([temporal_opt])+tuple(spatial_opt)+tuple([ridge_opt])
cvresults['optima'] = optima # store optima
unique_optima = np.vstack(set(tuple(row) for row in optima)) # get unique rows
# estimate solutions
solutions = [[]]*nresponses
for idx in range(unique_optima.shape[0]):
# get hyper parameters
uopt = unique_optima[idx][0], unique_optima[idx][1:-1], unique_optima[idx][-1]
temporal_opt, spatial_opt, ridge_opt = uopt
# fit responses that have this optimum
if population_optimal:
train_responses = responses_train
test_responses = responses_test
else:
responses_mask = np.asarray([np.allclose(row, unique_optima[idx]) for row in optima])
train_responses = responses_train[:, responses_mask]
test_responses = None if responses_test is None else responses_test[:, responses_mask]
response_solution = estimate_simple_stem_wmvnp(features_train,
train_responses,
features_test=features_test,
responses_test=test_responses,
temporal_prior=temporal_prior,
temporal_hhparam=temporal_opt,
feature_priors=feature_priors,
feature_hyparams=spatial_opt,
weights=weights,
performance=performance,
predictions=predictions,
ridge_scale=ridge_opt,
verbosity=verbosity,
method=method,
metric=metric,
)
# store the solutions
if population_optimal:
solutions = response_solution
else:
for rdx, response_index in enumerate(responses_mask.nonzero()[0]):
# TODO: project weights to primal space if requested
solutions[response_index] = {k:v[...,rdx] for k,v in response_solution.items()}
if verbosity:
if population_optimal:
itxt = '%i responses:'%(nresponses)
else:
itxt = '%i responses:'%(responses_mask.sum())
ttxt = "ridge=%9.03f, temporal=%0.03f," % (ridge_opt, temporal_opt)
stxt = "spatial=("
stxt += ', '.join(["%0.03f"]*(len(spatial_opt)))
stxt = stxt%tuple(spatial_opt) + ')'
if 'performance' in response_solution:
perf = 'perf=%0.04f'%response_solution['performance'].mean()
else:
perf = ''
print(' '.join([itxt, ttxt, stxt, perf]))
if population_optimal:
for k,v in solutions.items():
cvresults[k] = v
else:
fits = ddict(list)
for solution in solutions:
for k,v in solution.items():
fits[k].append(v)
for k,v in fits.items():
v = np.asarray(v).T
cvresults[k] = v
del fits, solutions
if verbosity:
print('Total duration %0.04f[mins]' % ((time.time()-start_time)/60.))
return cvresults
def dual2primal_weights(kernel_weights,
features_train,
feature_priors,
feature_hyparams,
temporal_prior,
temporal_hhparam=1.0,
):
'''Recover the feature weights from the kernel weights
for any one or all feature spaces.
Parameters
----------
kernel_weights : 2D np.ndarray
features_train : list of np.ndarrays
Training set feature spaces each of shape (n, p_i).
feature_priors : list of ``SpatialPrior`` objects
One feature prior per feature space.
feature_hyparams : list of scalars
Scalar for each feature space prior
temporal_prior : ``TemporalPrior`` object
A temporal prior object to use. The temporal
prior may contain a hyper-prior.
temporal_hhparam : scalar
Hyper-prior hyperparameter if required.
Defaults to 1.0, no effect.
Returns
-------
weights : list
Feature space weights
[(p_1, v), ..., (p_l, v)]
'''
weights = []
tp = temporal_prior.get_prior(hhparam=temporal_hhparam)
delays = temporal_prior.delays
if isinstance(features_train, np.ndarray):
print('Only one feature space given...')
features_train = [features_train]
assert not isinstance(feature_priors, list)
assert not isinstance(feature_hyparams, list)
feature_priors = [feature_priors]
features_hyparams = [feature_hyparams]
for fidx, features in enumerate(features_train):
Xi = tikutils.delay_signal(features, delays)
sp = feature_priors[fidx].get_prior(feature_hyparams[fidx])
if tikutils.isdiag(sp) and tikutils.isdiag(tp):
# handle simple banded and ridge cases
Wi = np.dot(Xi.T, kernel_weights)
Wi *= feature_hyparams[fidx]**-2
else:
Sigma = np.kron(tp, sp)
Wi = np.linalg.multi_dot([Sigma, Xi.T, kernel_weights])
weights.append(Wi)
if len(weights) == 1:
# only one feature space given
weights = weights[0]
return weights
def dual2primal_weights_banded(kernel_weights,
feature_space_train,
population_feature_prior,
temporal_prior,
delays_mean=False,
verbose=True):
'''WIP. WILL CHANGE. USE AT OWN RISK.
'''
# normalize population alphas by voxel-wise lambda scale
alpha = tikutils.mult_diag(kernel_weights, population_feature_prior, left=False)
primal_weights = 0.0 if delays_mean else []
ndelays = float(len(temporal_prior.delays))
for idx, idelay in enumerate(temporal_prior.delays):
print('Working on delay %i (%i/%i)'%(idelay, idx+1, ndelays))
delay_weights = np.zeros_like(feature_space_train).T
for jdx, jdelay in enumerate(temporal_prior.delays):
if temporal_prior.asarray[idx, jdx] == 0:
continue
Xdj = tikutils.delay_signal(feature_space_train, [jdelay])
delay_weights += temporal_prior.asarray[idx, jdx]*Xdj.T
# project to primal space
weights = np.dot(delay_weights, alpha)
if delays_mean:
primal_weights += weights/ndelays
else:
primal_weights.append(weights)
return np.asarray(primal_weights)
def estimate_simple_stem_wmvnp(features_train,
responses_train,
features_test=None,
responses_test=None,
feature_priors=None,
feature_hyparams=None,
temporal_prior=None,
temporal_hhparam=1.0,
ridge_scale=1.0,
weights=False,
performance=False,
predictions=False,
kernel_features=False,
method='SVD',
verbosity=2,
metric=METRIC,
):
'''Estimate model with given hyper-parameters
Parameters
----------
features_train : list of np.ndarrays
Training set feature spaces each of shape (n, p_i).
responses_train : 2D np.ndarray
Population responses to the training set (n, v).
features_test : list of np.ndarrays
Test set feature spaces each of shape (m, p_i)
responses_test : 2D np.ndarray
Population responses to the test set (m, v)
temporal_prior : ``TemporalPrior`` object
A temporal prior object to use. The temporal
prior may contain a hyper-prior.
temporal_hhparam : scalar
Hyper-prior hyperparameter if required.
Defaults to 1.0, no effect.
feature_priors : list of ``SpatialPrior`` objects
One feature prior per feature space.
feature_hyparams : list of scalars
Scalar for each feature space prior
ridge_scale : scalar
Scalar on the feature prior hyper-parameters.
Defaults to 1.0, no effect
weights : bool
Compute weights
performance : bool
Compute accuracy of predictions against test set responses
predictions: boot
Test set prediction time courses
kernel_features : boot
If True, ``features_train`` and ``features_test``
is a list of kernels, one per feature space.
This is only allowed if ``feature_priors`` are spherical.
method : str {"SVD", "Chol"}
Solver to use
Returns
-------
fit : dictionary
Estimated model dictionary with keys:
* weights : [(p_1, v), ..., (p_l, v)]
* performance : (1, v)
* predictions : (m, v)
'''
if feature_hyparams is None:
feature_hyparams = [1.0]*len(features_train)
# we're only using one set in this function
assert len(feature_hyparams) == len(features_train)
kernel_estimate = kernel_spatiotemporal_prior
### optimize solution
if kernel_features is True:
doitfast = [False]*len(features_train)
for fi, fp in enumerate(feature_priors):
if (tikutils.isdiag(fp.asarray) and
np.allclose(np.diag(fp.asarray), fp.asarray[0,0])):
doitfast[fi] = True
# kernels only allowed with banded feature priors
assert np.allclose(doitfast, True)
kernel_estimate = kernel_banded_temporal_prior
if features_test is None:
features_test = [features_test]*len(features_train)
Ktrain = 0.
Ktest = 0.
this_temporal_prior = temporal_prior.get_prior(hhparam=temporal_hhparam)
for fdx, (fs_train, fs_test, fs_prior, fs_hyper) in enumerate(zip(features_train,
features_test,
feature_priors,
feature_hyparams)):
Ktrain += kernel_estimate(fs_train,
this_temporal_prior,
fs_prior.get_prior(fs_hyper),
delays=temporal_prior.delays)
if fs_test is not None:
if kernel_features:
# fs_test is already test kernel
Ktest += kernel_estimate(fs_test,
this_temporal_prior,
fs_prior.get_prior(fs_hyper),
delays=temporal_prior.delays,
)
else:
Ktest += kernel_estimate(fs_train,
this_temporal_prior,
fs_prior.get_prior(fs_hyper),
delays=temporal_prior.delays,
Xtest=fs_test)
if features_test is None:
Ktest = None
# solve for this response
response_solution = solve_l2_dual(Ktrain, responses_train,
Ktest=Ktest,
Ytest=responses_test,
ridges=[ridge_scale],
performance=performance,
predictions=predictions,
weights=weights,
verbose=verbosity > 1,
metric=metric,
method=method)
# TODO: map weights from dual to primal space
return response_solution
class counter(object):
def __init__(self):
self.count = 0
def update(self):
self.count +=1
def hyperopt_crossval_stem_wmvnp(features_train,
responses_train,
features_test=None,
responses_test=None,
temporal_prior=None,
feature_priors=None,
spatial_sampler=True,
temporal_sampler=False,
ridge_sampler=False,
population_optimal=False,
folds=(1,5),
method='SVD',
ntrials=100,
verbosity=1,
dumpcrossval=False,
normalize_hyparams=False,
normalize_kernel=False,
weights=False,
predictions=False,
performance=True,
metric=METRIC,
zscore_ytrain=True,
zscore_yval=True,
search_algorithm='tpe',
trials=None,
**kwargs):
'''Use ``hyperopt`` to cross-validate all hyper-parameters parameters.
Search the hyper-parameter space to find the population optimum using
a cross-validation procedure.
Parameters
----------
features_train : list of np.ndarrays
The feature spaces of shape (n, p_i).
responses_train : 2D np.ndarray
The population responses to fit (n, v).
temporal_prior : ``TemporalPrior`` object
A temporal prior object to use. The temporal
prior may contain a hyper-prior.
feature_priors : list of ``SpatialPrior``bjects
One feature prior per feature space.
spatial_sampler : ``hyperopt.hp``, or bool
Specifies how to sample the hyperparameter space.
Defaults to hp.loguniform(0,7).
temporal_sampler : ``hyperopt.hp``, or bool
Used iff ``temporal_prior`` has a hyper-prior set.
ridge_sampler : ``hyperopt.hp``, or bool
Defaults to False.
Use this with caution. Specifies how to sample
the scaling on the spatial hyperparameters. However,
Specifying a ``spatial_sampler`` for all feature
spaces and a ``ridge_sampler`` is redundant.
population_optimal : bool
If True, individual response cross-validation values
are not kept. Only the mean across responses is stored.
folds : tuple (N,K), or list of tuples [(trn1, val1),..., (trnK, valK)]
If tuple, the second element corresponds to the number
of cross-validation folds. The first element determines
how many times to repeat the cross validation.
(1,5) is standard 5-folds cross-validation.
(10,5) performs 5-fold cross-validation 10 times.
method : str ("SVD", "Chol")
Solver to use
ntrials : int
Number of ``hyperopt`` iterations
verbosity : int (1, 2)
Level of print statements
dumpcrossval : function(iteration_num, crossval_dict)
Save the cross-validation results for every iteration.
The function takes the iteration number (int) and a
dictionary containing the cross-validation results.
This is useful for finding the optimum hyper-parameters
for each response. The stored data contains all the info.
Defaults to False.
kwargs : dict
Additional arguments passed to ``crossval_stem_wmvnp``.
Returns
-------
crossval_results : hyperopt.Trials object
Contains the cross-validation results from hyperopt.
'''
import pickle
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
if search_algorithm == 'tpe':
search_algorithm = tpe
elif search_algorithm in ['random', 'rand']:
try:
from hyperopt import rand as random
except ImportError:
from hyperopt import random
search_algorithm = random
else:
raise ValueError('Unknown hyperopt search algortihm: %s'%search_algorithm)
delays = temporal_prior.delays
ndelays = len(delays)
spaces = []
has_spatial = True
if (spatial_sampler is True) or (spatial_sampler is None):
for i in range(len(features_train)):
sampler = hp.loguniform('X%0i_hyparam'%(i+1), -7, 7)
spaces.append(sampler)
else:
for ss in spatial_sampler:
spaces.append(ss)
if ridge_sampler is True:
has_ridge = True
spaces.append(ridge_sampler('ridge_scale', 0, 7))
elif ridge_sampler is False:
has_ridge = False
else:
has_ridge = True
spaces.append(ridge_sampler)
if (len(temporal_prior.get_hhparams()) > 1) and (temporal_sampler is True):
has_temporal = True
spaces.append(hp.uniform('temporal_hhparam', 0, 10))
elif temporal_sampler is False:
has_temporal = False
else:
# append given temporal sampler
has_temporal = True
spaces.append(temporal_sampler)
if features_test is None:
features_test = [features_test]*len(features_train)
def objective(params):
mcounter.update() # 1-indexed
if has_spatial and has_ridge and has_temporal:
parameters = {'temporal' : params[-1],
'ridge' : params[-2],
'spatial' : params[:-2]}
elif has_spatial and has_ridge and not has_temporal:
parameters = {'temporal' : 1.0,
'ridge' : params[-1],
'spatial' : params[:-1]}
elif has_spatial and has_temporal and not has_ridge:
parameters = {'temporal' : params[-1],
'ridge' : 1.0,
'spatial' : params[:-1]}
elif has_spatial and (not has_temporal) and (not has_ridge):
parameters = {'temporal' : 1.0,
'ridge' : 1.0,
'spatial' : params}
else:
print(params)
raise ValueError('invalid hyperparams')
temporal_prior.set_hhparameters(parameters['temporal'])
for fi, feature_prior in enumerate(feature_priors):
feature_prior.set_hyparams(parameters['spatial'][fi])
res = crossval_stem_wmvnp(features_train,
responses_train,
ridges=[parameters['ridge']],
normalize_hyparams=normalize_hyparams,
normalize_kernel=normalize_kernel,
temporal_prior=temporal_prior,
feature_priors=feature_priors,
population_mean=population_optimal,
folds=folds,
method=method,
verbosity=verbosity,
metric=metric,
zscore_ytrain=zscore_ytrain,
zscore_yval=zscore_yval,
**kwargs)
print(params)
cvres = np.nan_to_num(res['cvresults'].mean(0)).mean(-1).mean()
loss = -1*cvres
res['cvresults'] = res['cvresults'].astype(np.float32)
if dumpcrossval:
# if given, takes the iteration number
# and the crossvalidation data
dumpcrossval(mcounter.count, res)
print('iteration #%i'%mcounter.count)
print('features:', parameters['spatial'], res['spatial'])
print('ridges:', parameters['ridge'], res['ridges'])
print('temporal', parameters['temporal'], res['temporal'])
print((res['spatial'], res['temporal'], res['ridges']))
print('perf: %0.05f / loss: %0.05f'%(cvres, loss))
return {'loss' : loss,
'attachments' : {'internals' : pickle.dumps({'temporal' : res['temporal'],
'spatial' : res['spatial'],
'ridges' : res['ridges'],
}),
},
'status': STATUS_OK,
}
mcounter = counter()
if trials is None:
trials = Trials()
if len(trials.trials):
current = np.max([t['tid'] for t in trials.trials]) + 1
print('Restarting hyperopt search from trial #%i'%current)
mcounter.count = current
best_params = fmin(objective,
space=spaces,
algo=search_algorithm.suggest,
max_evals=ntrials,
trials=trials)
print(best_params)
return trials
def featurespace_dual2primal(kernel_weights,
feature_space,
feature_prior,
feature_hyparam,
temporal_prior,
temporal_hhparam=1.0,
):
'''
'''
delays = temporal_prior.delays
weights = []
for jdx, jdelay in enumerate(delays):
Xf = 0
for idx, idelay in enumerate(delays):
if temporal_prior[idx,jdx] == 0:
continue
Xd = feature_space[tikutils.delay2slice(idelay)]
Xf += temporal_prior[jdx,idx]*matrix_mult(Xd, spatial_prior)
weights.append(xf)
return weights
def voxelwise_weights2preds(kernel_weights,
kernel_test,
responses_test,
feature_prior,
feature_hyparam,
temporal_prior,
temporal_hyparam=1.0,
verbose=True,
metric=METRIC):
'''
feature_prior : one per voxel!
temporal_prior : one for all voxels
'''
if metric == 'correlation':
performance_metric = tikutils.columnwise_correlation
elif metric == 'rsquared':
performance_metric = tikutils.columnwise_rsquared
else:
ValueError('Unknown metric: %s'%metric)
nresponses = kernel_weights.shape[-1]
unique_optima = np.unique(feature_hyparam)
# estimate solutions
solutions = [[]]*nresponses
for idx in range(unique_optima.shape[0]):
# get hyper parameters
# uopt = unique_optima[idx][0], unique_optima[idx][1:-1], unique_optima[idx][-1]
# temporal_opt, spatial_opt, ridge_opt = uopt
spatial_opt = unique_optima[idx]
responses_mask = np.where(feature_hyparam == spatial_opt, True, False)
test_responses = responses_test[:, responses_mask]
test_weights = kernel_weights[:, responses_mask]
ktst = kernel_banded_temporal_prior(kernel_test,
temporal_prior.get_prior(temporal_hyparam),
feature_prior.get_prior(spatial_opt),
delays=temporal_prior.delays)
fs_pred = np.dot(ktst, test_weights)
fs_cc = performance_metric(fs_pred, test_responses, zscoreb=False)
if idx % 10 == 0:
print(idx, unique_optima.shape[0], np.nan_to_num(fs_cc).mean(), fs_cc.shape)
for rdx, response_index in enumerate(responses_mask.nonzero()[0]):
# TODO: project weights to primal space if requested
solutions[response_index] = fs_cc[...,rdx].squeeze()
return np.asarray(solutions)
def hyperopt_trials2cvperf(Trials):
'''
'''
import pickle
ntrials = len(Trials)
hyparams = []
for trial in range(ntrials):
hyp = Trials.trial_attachments(Trials.trials[trial])['internals']
hyp = pickle.loads(hyp)
hyp = np.c_[hyp['spatial'], hyp['ridges'], hyp['temporal']]
hyparams.append(hyp.squeeze())
hyparams = np.asarray(hyparams)
corrs = -1*np.asarray(Trials.losses())
return hyparams, corrs[...,None]
def hyperopt_estimate_stem_wmvnp(features_train,
responses_train,
cvmean,
hyparams,
features_test=None,
responses_test=None,
temporal_prior=None,
feature_priors=None,
spatial_sampler=True,
temporal_sampler=False,
ridge_sampler=False,
population_optimal=False,
method='SVD',
verbosity=1,
normalize_hyparams=False,
normalize_kernel=False,
weights=False,
predictions=False,
performance=True,
kernel_features=False,
metric=METRIC,
**kwargs):
'''
'''
## find optima across cross-validation folds
nresponses = cvmean.shape[-1]
results = {}
if population_optimal is True and (nresponses > 1):
cvmean = np.nan_to_num(cvmean).mean(-1)[...,None]
nfspaces = len(hyparams[0][:-2])
ntspaces = 1
nrspaces = 1
ncvresponses = 1 if population_optimal else nresponses
optima = np.zeros((ncvresponses, nfspaces + ntspaces + nrspaces))
optima_cvmean = ddict(list)
for idx in range(ncvresponses):
# find response optima
odx = np.argmax(cvmean[...,idx])
ohyparams = hyparams[odx]
temporal_opt = ohyparams[-1]
ridge_opt = ohyparams[-2]
spatial_opt = ohyparams[:-2]
toptima = tuple([temporal_opt])+tuple(spatial_opt)+tuple([ridge_opt])
optima[idx] = toptima
optima_cvmean[toptima].append(cvmean[odx,idx])
results['optima'] = optima # store optima
optima_cvmean = {k:np.mean(v) for k,v in optima_cvmean.items()}
unique_optima = np.vstack(set(tuple(row) for row in optima))
unique_cvmean = [optima_cvmean[tuple(urow)] for urow in unique_optima]
unique_sorted = np.argsort(unique_cvmean)[::-1]
print('%i unique solutions'%len(unique_sorted))
# estimate solutions
solutions = [[]]*nresponses
for idx in unique_sorted:#range(unique_optima.shape[0]):
# get hyper parameters
uopt = unique_optima[idx][0], unique_optima[idx][1:-1], unique_optima[idx][-1]
temporal_opt, spatial_opt, ridge_opt = uopt
# fit responses that have this optimum
if population_optimal:
train_responses = responses_train
test_responses = responses_test
else:
responses_mask = np.asarray([np.allclose(row, unique_optima[idx]) for row in optima])
train_responses = responses_train[:, responses_mask]
test_responses = None if responses_test is None else responses_test[:, responses_mask]
response_solution = estimate_simple_stem_wmvnp(features_train,
train_responses,
features_test=features_test,
responses_test=test_responses,
temporal_prior=temporal_prior,
temporal_hhparam=temporal_opt,
feature_priors=feature_priors,
feature_hyparams=spatial_opt,
weights=weights,
performance=performance,
predictions=predictions,
ridge_scale=ridge_opt,
verbosity=verbosity,
method=method,
kernel_features=kernel_features,
metric=metric,
)
# store the solutions
if population_optimal:
solutions = response_solution
else:
for rdx, response_index in enumerate(responses_mask.nonzero()[0]):
# TODO: project weights to primal space if requested
solutions[response_index] = {k:v[...,rdx] for k,v in response_solution.items()}
if verbosity:
print(idx)
if population_optimal:
itxt = '%i responses:'%(nresponses)
else:
itxt = '%i responses:'%(responses_mask.sum())
ttxt = "ridge=%9.03f, temporal=%0.03f," % (ridge_opt, temporal_opt)
stxt = "spatial=("
stxt += ', '.join(["%0.03f"]*(len(spatial_opt)))
stxt = stxt%tuple(spatial_opt) + ')'
perf = 'perf=%0.04f'%response_solution['performance'].mean()
print(' '.join([itxt, ttxt, stxt, perf]))
if population_optimal:
for k,v in solutions.items():
results[k] = v
else:
fits = ddict(list)
for solution in solutions:
for k,v in solution.items():
fits[k].append(v)
for k,v in fits.items():
v = np.asarray(v).T
results[k] = v
del fits, solutions
return results
if __name__ == '__main__':
pass
| 2.125 | 2 |
tests/unit/test_cropping.py | LukasBommes/PV-Drone-Inspect | 5 | 12766325 | import unittest
import cv2
import numpy as np
from extractor.cropping import clip_to_image_region, \
crop_module, build_merged_index
quadrilaterals = {
('e3e70682-c209-4cac-a29f-6fbed82c07cd',
'frame_000000',
'mask_000000'): {
'quadrilateral': [
[424, 279],
[499, 280],
[499, 327],
[421, 323]
],
'center': (
460.95042812077514,
302.4197085774373
)
},
('f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'frame_000000',
'mask_000001'): {
'quadrilateral': [
[425, 326],
[499, 326],
[499, 377],
[425, 372]
],
'center': (
462.13331381447324,
350.2644805543356
)
},
('eb1167b3-67a9-4378-bc65-c1e582e2e662',
'frame_000000',
'mask_000002'): {
'quadrilateral': [
[164, 358],
[233, 363],
[233, 412],
[164, 408]
],
'center': (
198.48300673606857,
385.4114104919371
)
},
('f7c1bd87-4da5-4709-9471-3d60c8a70639',
'frame_000000',
'mask_000003'): {
'quadrilateral': [
[425, 234],
[497, 231],
[501, 279],
[421, 278]
],
'center': (
461.41970207121716,
255.7820630547903
)
},
('e443df78-9558-467f-9ba9-1faf7a024204',
'frame_000000',
'mask_000004'): {
'quadrilateral': [
[425, 94],
[498, 90],
[502, 136],
[425, 142]
],
'center': (
462.19730041647847,
115.55311355311355
)
}
}
class TestCropping(unittest.TestCase):
def test_clip_to_image_region_no_clip(self):
quad = np.array([
[[424, 279]],
[[499, 280]],
[[499, 327]],
[[421, 323]]
])
image_width = 640
image_height = 512
quad_clipped_gt = quad
quad_clipped = clip_to_image_region(
np.copy(quad), image_width, image_height)
self.assertTrue(
np.allclose(
quad_clipped,
quad_clipped_gt
)
)
def test_clip_to_image_region_clip_max(self):
quad = np.array([
[[424, 279]],
[[499, 280]],
[[499, 327]],
[[421, 323]]
])
image_width = 300
image_height = 200
quad_clipped_gt = np.array([
[[299, 199]],
[[299, 199]],
[[299, 199]],
[[299, 199]]
])
quad_clipped = clip_to_image_region(
np.copy(quad), image_width, image_height)
self.assertTrue(
np.allclose(
quad_clipped,
quad_clipped_gt
)
)
def test_clip_to_image_region_clip_min(self):
quad = np.array([
[[ -1, -1]],
[[100, -1]],
[[100, 100]],
[[ -1, 100]]
])
image_width = 200
image_height = 200
quad_clipped_gt = np.array([
[[ 0, 0]],
[[100, 0]],
[[100, 100]],
[[ 0, 100]]
])
quad_clipped = clip_to_image_region(
np.copy(quad), image_width, image_height)
self.assertTrue(
np.allclose(
quad_clipped,
quad_clipped_gt
)
)
def test_build_merged_index_merged_none(self):
merged_modules = None
merged_index_gt = {
'e3e70682-c209-4cac-a29f-6fbed82c07cd': 'e3e70682-c209-4cac-a29f-6fbed82c07cd',
'e443df78-9558-467f-9ba9-1faf7a024204': 'e443df78-9558-467f-9ba9-1faf7a024204',
'f7c1bd87-4da5-4709-9471-3d60c8a70639': 'f7c1bd87-4da5-4709-9471-3d60c8a70639',
'eb1167b3-67a9-4378-bc65-c1e582e2e662': 'eb1167b3-67a9-4378-bc65-c1e582e2e662',
'f728b4fa-4248-4e3a-8a5d-2f346baa9455': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455'
}
merged_index = build_merged_index(merged_modules, quadrilaterals)
self.assertEqual(merged_index, merged_index_gt)
def test_build_merged_index_merged_empty(self):
merged_modules = []
merged_index_gt = {
'e3e70682-c209-4cac-a29f-6fbed82c07cd': 'e3e70682-c209-4cac-a29f-6fbed82c07cd',
'e443df78-9558-467f-9ba9-1faf7a024204': 'e443df78-9558-467f-9ba9-1faf7a024204',
'f7c1bd87-4da5-4709-9471-3d60c8a70639': 'f7c1bd87-4da5-4709-9471-3d60c8a70639',
'eb1167b3-67a9-4378-bc65-c1e582e2e662': 'eb1167b3-67a9-4378-bc65-c1e582e2e662',
'f728b4fa-4248-4e3a-8a5d-2f346baa9455': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455'
}
merged_index = build_merged_index(merged_modules, quadrilaterals)
self.assertEqual(merged_index, merged_index_gt)
def test_build_merged_index_pair_merged(self):
merged_modules = [[
'f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'f7c1bd87-4da5-4709-9471-3d60c8a70639'
]]
merged_index_gt = {
'e3e70682-c209-4cac-a29f-6fbed82c07cd': 'e3e70682-c209-4cac-a29f-6fbed82c07cd',
'e443df78-9558-467f-9ba9-1faf7a024204': 'e443df78-9558-467f-9ba9-1faf7a024204',
'f7c1bd87-4da5-4709-9471-3d60c8a70639': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'eb1167b3-67a9-4378-bc65-c1e582e2e662': 'eb1167b3-67a9-4378-bc65-c1e582e2e662',
'f728b4fa-4248-4e3a-8a5d-2f346baa9455': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455'
}
merged_index = build_merged_index(merged_modules, quadrilaterals)
self.assertEqual(merged_index, merged_index_gt)
def test_build_merged_index_triplet_merged(self):
merged_modules = [[
'f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'f7c1bd87-4da5-4709-9471-3d60c8a70639',
'e3e70682-c209-4cac-a29f-6fbed82c07cd'
]]
merged_index_gt = {
'e3e70682-c209-4cac-a29f-6fbed82c07cd': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'e443df78-9558-467f-9ba9-1faf7a024204': 'e443df78-9558-467f-9ba9-1faf7a024204',
'f7c1bd87-4da5-4709-9471-3d60c8a70639': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'eb1167b3-67a9-4378-bc65-c1e582e2e662': 'eb1167b3-67a9-4378-bc65-c1e582e2e662',
'f728b4fa-4248-4e3a-8a5d-2f346baa9455': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455'
}
merged_index = build_merged_index(merged_modules, quadrilaterals)
self.assertEqual(merged_index, merged_index_gt)
def test_build_merged_index_two_pairs_merged(self):
merged_modules = [
['f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'f7c1bd87-4da5-4709-9471-3d60c8a70639'],
['e3e70682-c209-4cac-a29f-6fbed82c07cd',
'e443df78-9558-467f-9ba9-1faf7a024204']
]
merged_index_gt = {
'e3e70682-c209-4cac-a29f-6fbed82c07cd': 'e3e70682-c209-4cac-a29f-6fbed82c07cd',
'e443df78-9558-467f-9ba9-1faf7a024204': 'e3e70682-c209-4cac-a29f-6fbed82c07cd',
'f7c1bd87-4da5-4709-9471-3d60c8a70639': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'eb1167b3-67a9-4378-bc65-c1e582e2e662': 'eb1167b3-67a9-4378-bc65-c1e582e2e662',
'f728b4fa-4248-4e3a-8a5d-2f346baa9455': 'f728b4fa-4248-4e3a-8a5d-2f346baa9455'
}
merged_index = build_merged_index(merged_modules, quadrilaterals)
self.assertEqual(merged_index, merged_index_gt)
def test_build_merged_index_all_merged(self):
merged_modules = [[
'f7c1bd87-4da5-4709-9471-3d60c8a70639',
'f728b4fa-4248-4e3a-8a5d-2f346baa9455',
'e3e70682-c209-4cac-a29f-6fbed82c07cd',
'e443df78-9558-467f-9ba9-1faf7a024204',
'eb1167b3-67a9-4378-bc65-c1e582e2e662',
]]
merged_index_gt = {
'e3e70682-c209-4cac-a29f-6fbed82c07cd': 'f7c1bd87-4da5-4709-9471-3d60c8a70639',
'e443df78-9558-467f-9ba9-1faf7a024204': 'f7c1bd87-4da5-4709-9471-3d60c8a70639',
'f7c1bd87-4da5-4709-9471-3d60c8a70639': 'f7c1bd87-4da5-4709-9471-3d60c8a70639',
'eb1167b3-67a9-4378-bc65-c1e582e2e662': 'f7c1bd87-4da5-4709-9471-3d60c8a70639',
'f728b4fa-4248-4e3a-8a5d-2f346baa9455': 'f7c1bd87-4da5-4709-9471-3d60c8a70639'
}
merged_index = build_merged_index(merged_modules, quadrilaterals)
self.assertEqual(merged_index, merged_index_gt)
def test_crop_modules_real_data(self):
frame_file = "tests/unit/data/frame_000000.tiff"
frame = cv2.imread(frame_file, cv2.IMREAD_ANYDEPTH)
quad = np.array([
[[424, 279]],
[[499, 280]],
[[499, 327]],
[[421, 323]]
])
patch_file = "tests/unit/data/frame_000000_mask_000000.tiff"
patch_gt = cv2.imread(patch_file, cv2.IMREAD_ANYDEPTH)
patch, _ = crop_module(
frame,
quad,
crop_width=None,
crop_aspect=None,
rotate_mode="portrait"
)
self.assertTrue(np.allclose(patch, patch_gt))
def test_crop_modules_crop_full_frame(self):
frame_file = "tests/unit/data/frame_000000.tiff"
frame = cv2.imread(frame_file, cv2.IMREAD_ANYDEPTH)
quad = np.array([
[[0, 0]],
[[640, 0]],
[[640, 512]],
[[0, 512]]
])
patch, homography = crop_module(
frame,
quad,
crop_width=None,
crop_aspect=None,
rotate_mode="landscape"
)
self.assertTrue(np.allclose(patch, frame[0:-1, 0:-1]))
self.assertTrue(np.allclose(homography, np.eye(3)))
def test_crop_modules_portrait_vs_landscape(self):
frame_file = "tests/unit/data/frame_000000.tiff"
frame = cv2.imread(frame_file, cv2.IMREAD_ANYDEPTH)
quad = np.array([
[[424, 279]],
[[499, 280]],
[[499, 327]],
[[421, 323]]
])
patch, _ = crop_module(
frame,
quad,
crop_width=None,
crop_aspect=None,
rotate_mode="portrait"
)
self.assertEqual(patch.shape, (78, 47))
patch, _ = crop_module(
frame,
quad,
crop_width=None,
crop_aspect=None,
rotate_mode="landscape"
)
self.assertEqual(patch.shape, (47, 78))
patch, _ = crop_module(
frame,
quad,
crop_width=None,
crop_aspect=None,
rotate_mode=None
)
self.assertEqual(patch.shape, (47, 78)) # ?
def test_crop_modules_crop_width_and_aspect(self):
frame_file = "tests/unit/data/frame_000000.tiff"
frame = cv2.imread(frame_file, cv2.IMREAD_ANYDEPTH)
quad = np.array([
[[424, 279]],
[[499, 280]],
[[499, 327]],
[[421, 323]]
])
patch, _ = crop_module(
frame,
quad,
crop_width=50,
crop_aspect=0.625, # 1/1.6
rotate_mode="portrait"
)
self.assertEqual(patch.shape, (50, 31))
patch, _ = crop_module(
frame,
quad,
crop_width=50,
crop_aspect=1,
rotate_mode="portrait"
)
self.assertEqual(patch.shape, (50, 50))
patch, _ = crop_module(
frame,
quad,
crop_width=50,
crop_aspect=0.625, # 1/1.6
rotate_mode="landscape"
)
self.assertEqual(patch.shape, (31, 50))
patch, _ = crop_module(
frame,
quad,
crop_width=300,
crop_aspect=0.625, # 1/1.6
rotate_mode="portrait"
)
self.assertEqual(patch.shape, (300, 187)) | 2.234375 | 2 |
Utils/UserGroups.py | Mihai925/EduCoding-Legacy | 0 | 12766326 | <reponame>Mihai925/EduCoding-Legacy<filename>Utils/UserGroups.py
__author__ = 'varun'
TEACHER = 'Teacher'
STUDENT = 'Student' | 1.070313 | 1 |
example/producer/app/demo.py | NimzyMaina/flask-rabbitmq | 55 | 12766327 | <gh_stars>10-100
# encoding:utf-8
from app import mq, queue
import json
@queue(queue_name='rpc-queue')
def sum_callback(ch, method, props, body):
print(props.correlation_id)
print(props.reply_to)
data = json.loads(body)
result = data['a'] + data['b']
print("Result -- " + str(result))
data = {
'result': result
}
ch.basic_ack(delivery_tag=method.delivery_tag)
mq.send_json(data, exchange='', key=props.reply_to, corr_id=props.correlation_id) | 2.25 | 2 |
core/schema/category.py | 1uy31/learning-diary | 0 | 12766328 | import graphene
from graphene import relay
from graphene_sqlalchemy import SQLAlchemyObjectType
from core.models import Category as CategoryModel
from core.models import CategoryConnector
category_connector = CategoryConnector()
class CategoryNode(SQLAlchemyObjectType):
class Meta:
model = CategoryModel
interfaces = (relay.Node,)
class CreateCategory(graphene.Mutation):
class Arguments:
# TODO: max-length constraint
name = graphene.String(required=True)
Output = CategoryNode
def mutate(self, _, name):
"""
:param _:
:param name:
:return:
"""
category = category_connector.database_helper.create_object(
category_connector.model, name=name
)
category_node = CategoryNode.get_node(_, category.id)
return category_node
class UpdateCategory(graphene.Mutation):
class Arguments:
# TODO: max-length constraint
primary_key = graphene.Int(required=True)
name = graphene.String(required=True)
Output = CategoryNode
def mutate(self, _, primary_key, name):
"""
:param _:
:param primary_key:
:param name:
:return:
"""
category = category_connector.database_helper.update_object(
category_connector.model, primary_key, name=name
)
category_node = CategoryNode.get_node(_, category.id)
return category_node
| 2.640625 | 3 |
django_postgres/view.py | zacharyvoase/django-postgres | 51 | 12766329 | <reponame>zacharyvoase/django-postgres
"""Helpers to access Postgres views from the Django ORM."""
import collections
import copy
import logging
import re
from django.db import connection, transaction
from django.db import models
import psycopg2
from . import six
FIELD_SPEC_REGEX = (r'^([A-Za-z_][A-Za-z0-9_]*)\.'
r'([A-Za-z_][A-Za-z0-9_]*)\.'
r'(\*|(?:[A-Za-z_][A-Za-z0-9_]*))$')
FIELD_SPEC_RE = re.compile(FIELD_SPEC_REGEX)
log = logging.getLogger('django_postgres.view')
def hasfield(model_cls, field_name):
"""Like `hasattr()`, but for model fields.
>>> from django.contrib.auth.models import User
>>> hasfield(User, 'password')
True
>>> hasfield(User, 'fo<PASSWORD>')
False
"""
try:
model_cls._meta.get_field_by_name(field_name)
return True
except models.FieldDoesNotExist:
return False
# Projections of models fields onto views which have been deferred due to
# model import and loading dependencies.
# Format: (app_label, model_name): {view_cls: [field_name, ...]}
_DEFERRED_PROJECTIONS = collections.defaultdict(
lambda: collections.defaultdict(list))
def realize_deferred_projections(sender, *args, **kwargs):
"""Project any fields which were deferred pending model preparation."""
app_label = sender._meta.app_label
model_name = sender.__name__.lower()
pending = _DEFERRED_PROJECTIONS.pop((app_label, model_name), {})
for view_cls, field_names in six.iteritems(pending):
field_instances = get_fields_by_name(sender, *field_names)
for name, field in six.iteritems(field_instances):
# Only assign the field if the view does not already have an
# attribute or explicitly-defined field with that name.
if hasattr(view_cls, name) or hasfield(view_cls, name):
continue
copy.copy(field).contribute_to_class(view_cls, name)
models.signals.class_prepared.connect(realize_deferred_projections)
def create_views(models_module, update=True, force=False):
"""Create the database views for a given models module."""
for name, view_cls in six.iteritems(vars(models_module)):
if not (isinstance(view_cls, type) and
issubclass(view_cls, View) and
hasattr(view_cls, 'sql')):
continue
try:
created = create_view(connection, view_cls._meta.db_table,
view_cls.sql, update=update, force=force)
except Exception as exc:
exc.view_cls = view_cls
exc.python_name = models_module.__name__ + '.' + name
raise
else:
yield created, view_cls, models_module.__name__ + '.' + name
def create_view(connection, view_name, view_query, update=True, force=False):
"""
Create a named view on a connection.
Returns True if a new view was created (or an existing one updated), or
False if nothing was done.
If ``update`` is True (default), attempt to update an existing view. If the
existing view's schema is incompatible with the new definition, ``force``
(default: False) controls whether or not to drop the old view and create
the new one.
"""
cursor_wrapper = connection.cursor()
cursor = cursor_wrapper.cursor.cursor
try:
force_required = False
# Determine if view already exists.
cursor.execute('SELECT COUNT(*) FROM pg_catalog.pg_class WHERE relname = %s;',
[view_name])
view_exists = cursor.fetchone()[0] > 0
if view_exists and not update:
return 'EXISTS'
elif view_exists:
# Detect schema conflict by copying the original view, attempting to
# update this copy, and detecting errors.
cursor.execute('CREATE TEMPORARY VIEW check_conflict AS SELECT * FROM {0};'.format(view_name))
try:
cursor.execute('CREATE OR REPLACE TEMPORARY VIEW check_conflict AS {0};'.format(view_query))
except psycopg2.ProgrammingError:
force_required = True
cursor.connection.rollback()
finally:
cursor.execute('DROP VIEW IF EXISTS check_conflict;')
if not force_required:
cursor.execute('CREATE OR REPLACE VIEW {0} AS {1};'.format(view_name, view_query))
ret = view_exists and 'UPDATED' or 'CREATED'
elif force:
cursor.execute('DROP VIEW {0};'.format(view_name))
cursor.execute('CREATE VIEW {0} AS {1};'.format(view_name, view_query))
ret = 'FORCED'
else:
ret = 'FORCE_REQUIRED'
transaction.commit_unless_managed()
return ret
finally:
cursor_wrapper.close()
def get_fields_by_name(model_cls, *field_names):
"""Return a dict of `models.Field` instances for named fields.
Supports wildcard fetches using `'*'`.
>>> get_fields_by_name(User, 'username', 'password')
{'username': <django.db.models.fields.CharField: username>,
'password': <django.db.models.fields.CharField: password>}
>>> get_fields_by_name(User, '*')
{'username': <django.db.models.fields.CharField: username>,
...,
'date_joined': <django.db.models.fields.DateTimeField: date_joined>}
"""
if '*' in field_names:
return dict((field.name, field) for field in model_cls._meta.fields)
return dict((field_name, model_cls._meta.get_field_by_name(field_name)[0])
for field_name in field_names)
class View(models.Model):
"""Helper for exposing Postgres views as Django models."""
class ViewMeta(models.base.ModelBase):
def __new__(metacls, name, bases, attrs):
projection = attrs.pop('projection', [])
deferred_projections = []
for field_name in projection:
if isinstance(field_name, models.Field):
attrs[field_name.name] = copy.copy(field_name)
elif isinstance(field_name, basestring):
match = FIELD_SPEC_RE.match(field_name)
if not match:
raise TypeError("Unrecognized field specifier: %r" %
field_name)
deferred_projections.append(match.groups())
else:
raise TypeError("Unrecognized field specifier: %r" %
field_name)
view_cls = models.base.ModelBase.__new__(metacls, name, bases,
attrs)
for app_label, model_name, field_name in deferred_projections:
model_spec = (app_label, model_name.lower())
_DEFERRED_PROJECTIONS[model_spec][view_cls].append(field_name)
# If the model has already been loaded, run
# `realize_deferred_projections()` on it.
model_cls = models.get_model(app_label, model_name,
seed_cache=False)
if model_cls is not None:
realize_deferred_projections(model_cls)
return view_cls
__metaclass__ = ViewMeta
class Meta:
abstract = True
managed = False
| 2.15625 | 2 |
test/docker/startup_scripts/392_dcim_devices_primary_ips.py | hosting-de-labs/go-netbox-client | 0 | 12766330 | from dcim.models import Site, Rack, DeviceRole, DeviceType, Device, Platform
from ipam.models import IPAddress
from startup_script_utils import load_yaml
import sys
devices = load_yaml('/opt/netbox/initializers/dcim_devices.yml')
if devices is None:
sys.exit()
handled_attrs = [
'primary_ip4_id',
'primary_ip6_id'
]
for params in devices:
update = False
new_params = {}
for field in handled_attrs:
if field in params:
update = True
new_params[field] = params[field]
if len(new_params) == 0:
continue
if update:
Device.objects.filter(name=params['name']).update(**new_params)
print("🖥️ Updated device", params['name'])
| 2.3125 | 2 |
readthedocs/rtd_tests/tests/test_version_config.py | mforbes/readthedocs.org | 4,054 | 12766331 | <gh_stars>1000+
from django.test import TestCase
from django_dynamic_fixture import get
from readthedocs.builds.models import Build, Version
from readthedocs.projects.models import Project
class VersionConfigTests(TestCase):
def setUp(self):
self.project = get(Project)
self.version = get(Version, project=self.project)
def test_get_correct_config(self):
build_old = Build.objects.create(
project=self.project,
version=self.version,
_config={'version': 1},
)
build_new = Build.objects.create(
project=self.project,
version=self.version,
_config={'version': 2},
)
build_new_error = Build.objects.create(
project=self.project,
version=self.version,
_config={'version': 3},
success=False,
)
build_new_unfinish = Build.objects.create(
project=self.project,
version=self.version,
_config={'version': 4},
state='building',
)
self.assertEqual(self.version.config, {'version': 2})
def test_get_correct_config_when_same_config(self):
build_old = get(
Build,
project=self.project,
version=self.version,
_config={},
)
build_old.config = {'version': 1}
build_old.save()
build_new = get(
Build,
project=self.project,
version=self.version,
_config={},
)
build_new.config = {'version': 1}
build_new.save()
build_new_error = get(
Build,
project=self.project,
version=self.version,
_config={},
success=False,
)
build_new_error.config = {'version': 3}
build_new_error.save()
build_new_unfinish = get(
Build,
project=self.project,
version=self.version,
_config={},
state='building',
)
build_new_unfinish.config = {'version': 1}
build_new_unfinish.save()
config = self.version.config
self.assertEqual(config, {'version': 1})
| 2.40625 | 2 |
utils/sqlitedb.py | hkailee/Quant-Finance | 1 | 12766332 | import sqlite3, ast
##############################################
### Login to database
##############################################
def login(dbfile):
conn = sqlite3.connect(dbfile) # create or open db file
curs = conn.cursor()
return conn, curs
##############################################
### Create new database
##############################################
def makedb(dbfile, table, columnFeatures):
#columnFeatures = input("eg: (Column1 char(30), Column2 char(10), Column3 int(4))")
conn, curs = login(dbfile)
try:
curs.execute('drop table ' + table)
print('Dropped table ' + table)
except:
print('database table did not exist')
command = 'create table %s %s' % (table, columnFeatures)
curs.execute(command)
conn.commit()
##############################################
### Load Data
##############################################
def loaddb(table, dbfile, datafile, conn=None, verbose=True):
conn, curs = login(dbfile)
file = open(datafile)
rows = [line.rstrip().split('\t') for line in file] # [[x,x,x], [x,x,x]]
rows = [str(tuple(rec)) for rec in rows[1:]] # ["(x,x,x)", "(x,x,x)"]
for recstr in rows:
curs.execute('insert into ' + table + ' values ' + recstr)
if conn:
conn.commit()
if verbose:
print(len(rows), 'rows loaded')
##############################################
### Remove a table from Database
##############################################
def cleardb(dbfile, table):
conn, curs = login(dbfile)
try:
curs.execute('drop table ' + table)
conn.commit()
print('Dropped table ', table)
except:
print(table, 'table did not exist, creating this table') | 3.640625 | 4 |
api_stack/function/query_scan_status/app.py | richardfan1126/ssm-patch-portal | 4 | 12766333 | import os
import json
import boto3
ssm = boto3.client('ssm')
def query_association():
query_association_response = ssm.list_associations(
AssociationFilterList = [
{
"key": "AssociationName",
"value": "ssm-patch-portal-scan"
}
],
)
if len(query_association_response['Associations']) > 0:
return query_association_response['Associations'][0]
else:
return None
def handler(event, context):
api_response = {
"headers": {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "Origin, X-Requested-With, Content-Type, Accept"
}
}
try:
query_association_response = query_association()
if query_association_response is None:
response = None
else:
association = query_association_response
response = {
"associationId": association["AssociationId"] if "AssociationId" in association else None,
"lastExecutionDate": association["LastExecutionDate"].strftime("%Y-%m-%dT%H:%M:%SZ") if "LastExecutionDate" in association else None,
"overview": association["Overview"] if "Overview" in association else None,
}
api_response["statusCode"] = 200
api_response["body"] = json.dumps(response)
except Exception as e:
api_response["statusCode"] = 400
api_response["body"] = json.dumps({
"message": str(e)
})
finally:
return api_response | 2.09375 | 2 |
tests/unit/core/domain/test_message_bus.py | meaningfy-ws/ted-xml-2-rdf | 0 | 12766334 | <reponame>meaningfy-ws/ted-xml-2-rdf
#!/usr/bin/python3
"""
"""
import logging
from ted_sws.event_manager.adapters.logger import Logger
from ted_sws.event_manager.domain.message_bus import message_bus
from ted_sws.event_manager.model.message import Log
TEST_LOGGER = Logger(name="TEST_MESSAGE_BUS_LOGGER", level=logging.INFO)
TEST_LOGGER.get_logger().propagate = True
def test_message_bus_log(caplog):
log1 = Log(
title="test_message_bus_log",
message=["log_message1 :: 1", "log_message :: 2"],
logger=TEST_LOGGER
)
message_bus.set_domain_logger(TEST_LOGGER)
message_bus.handle(log1)
log2 = Log(
message="log_message2 :: MESSAGE",
logger=TEST_LOGGER
)
message_bus.handle(log2)
log3 = Log(
message=["log_message3 :: 1", "log_message3 :: 2"],
logger=TEST_LOGGER
)
message_bus.handle(log3)
if log1.title:
assert log1.title in caplog.text
if log1.message:
for message in log1.message:
assert message in caplog.text
if log2.message:
assert log2.message in caplog.text
if log3.message:
for message in log3.message:
assert message in caplog.text
| 2.375 | 2 |
example/data/migrations/0001_initial.py | lee0210/sproxy | 1 | 12766335 | # Generated by Django 2.0.7 on 2018-07-09 08:15
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Heartbeat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_name', models.CharField(max_length=50)),
('last_beat', models.DateTimeField()),
],
),
]
| 1.789063 | 2 |
snap/models.py | Imma7/Snap | 0 | 12766336 | from django.db import models
import datetime as dt
# Create your models here.
class Category(models.Model):
category_name = models.CharField(max_length = 50)
# image = models.ForeignKey(Image)
def __str__(self):
return self.category_name
class Location(models.Model):
location_name = models.CharField(max_length = 50)
# image = models.ForeignKey(Image)
def __str__(self):
return self.location_name
def save_location(self):
self.save()
class Image(models.Model):
image = models.ImageField(upload_to = 'gallery/', blank = True)
img_name = models.CharField(max_length = 30)
img_description = models.TextField(max_length=50, blank=True)
pub_date = models.DateTimeField(auto_now_add=True)
location = models.ForeignKey(Location)
category = models.ForeignKey(Category)
def __str__(self):
return self.img_name
def save_image(self):
self.save()
def delete_image(self):
self.delete()
@classmethod
def get_image_by_id(cls, id):
specific_image = cls.objects.get(id = id)
return specific_image
@classmethod
def display_image(cls):
today = dt.date.today()
@classmethod
def get_all(cls):
images = cls.objects.order_by('-pub_date')
return images
@classmethod
def filter_location(cls, location):
images = cls.objects.filter(location__location_name__istartswith=location)
return images
@classmethod
def filter_category(cls, category):
images = cls.objects.filter(category__category_name__istartswith=category)
return images
@classmethod
def search_image(cls, search_term):
images = cls.objects.filter(img_name__icontains=search_term)
return images
| 2.34375 | 2 |
run.py | gaolycn/ssr-panel-sanic | 9 | 12766337 | <reponame>gaolycn/ssr-panel-sanic
from ssr_panel import app
if __name__ == '__main__':
app.run(host=app.config.HOST, port=app.config.PORT, workers=app.config.WORKERS, debug=app.config.DEBUG)
| 1.273438 | 1 |
blog/urls.py | feynubrick/blog-django | 0 | 12766338 | <filename>blog/urls.py
from django.urls import path
from blog.views.home import get_home_page
from blog.views.about import get_about_page
from blog.views.posts import get_post_list_page, get_post_detail_page
urlpatterns = [
path('', get_home_page, name='blog_home'),
path('about', get_about_page, name='blog_about'),
path('posts', get_post_list_page, name='blog_posts'),
path('posts/<int:post_id>', get_post_detail_page, name='blog_post'),
] | 2.078125 | 2 |
add_image.py | ed-ortizm/slides-talks | 0 | 12766339 | <reponame>ed-ortizm/slides-talks
from pptx import Presentation
from pptx.util import Inches
img_path = 'data/akitas.png'
prs = Presentation()
blank_slide_layout = prs.slide_layouts[6]
slide = prs.slides.add_slide(blank_slide_layout)
left = top = Inches(1)
pic = slide.shapes.add_picture(img_path, left, top)
left = Inches(5)
height = Inches(5.5)
pic = slide.shapes.add_picture(img_path, left, top, height=height)
prs.save('test.pptx')
| 2.625 | 3 |
simdir/setup_dd.py | fuco99/bjt_devsim | 5 | 12766340 | # Copyright 2016 Devsim LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from devsim import *
from physics.new_physics import *
def run(device, region):
# this is our solution variable
CreateSolution(device, region, "Potential")
CreateSolution(device, region, "Electrons")
CreateSolution(device, region, "Holes")
#these are needed for velocity saturation
CreateEField(device, region)
CreateDField(device, region)
opts = CreateAroraMobilityLF(device, region)
opts = CreateHFMobility(device, region, **opts)
CreateSiliconDriftDiffusion(device, region, **opts)
for i in get_contact_list(device=device):
set_parameter(device=device, name=GetContactBiasName(i), value=0.0)
CreateSiliconDriftDiffusionContact(device, region, i, opts['Jn'], opts['Jp'])
| 2.296875 | 2 |
data.py | chrisliu298/pytorch_projects | 0 | 12766341 | <reponame>chrisliu298/pytorch_projects<gh_stars>0
import pandas as pd
import pytorch_lightning as pl
import torch
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
from torch.utils.data import DataLoader, TensorDataset
class DataModule(pl.LightningDataModule):
def __init__(
self,
train_file,
test_file,
batch_size=64,
num_embeddings=10000,
sequence_length=512,
num_dataloader_workers=2,
):
super().__init__()
self.train_file = train_file
self.test_file = test_file
self.batch_size = batch_size
self.num_embeddings = num_embeddings
self.sequence_length = sequence_length
self.num_dataloader_workers = num_dataloader_workers
self.tokenizer = Tokenizer(num_words=num_embeddings, oov_token="<OOV>")
self.tokenizer.fit_on_texts(
pd.read_csv(train_file, delimiter="\t")["text"]
)
def prepare_data(self):
# Download imdb dataset
train_data = pd.read_csv(self.train_file, delimiter="\t").sample(frac=1)
test_data = pd.read_csv(self.test_file, delimiter="\t").sample(frac=1)
train_text = train_data["text"].iloc[:20000]
train_label = train_data["label"].iloc[:20000].to_numpy()
val_text = train_data["text"].iloc[20000:]
val_label = train_data["label"].iloc[20000:].to_numpy()
test_text = test_data["text"]
test_label = test_data["label"].to_numpy()
train_text = self.tokenize(train_text)
val_text = self.tokenize(val_text)
test_text = self.tokenize(test_text)
# Convert numpy arrays to tensors
train_text, train_label = map(
torch.from_numpy, [train_text, train_label]
)
val_text, val_label = map(torch.from_numpy, [val_text, val_label])
test_text, test_label = map(torch.from_numpy, [test_text, test_label])
# Make tensor datasets
self.train = TensorDataset(train_text, train_label)
self.val = TensorDataset(val_text, val_label)
self.test = TensorDataset(test_text, test_label)
def tokenize(self, sequences):
return pad_sequences(
self.tokenizer.texts_to_sequences(sequences),
maxlen=self.sequence_length,
truncating="post",
padding="post",
)
def train_dataloader(self):
return DataLoader(
self.train,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_dataloader_workers,
pin_memory=True,
)
def val_dataloader(self):
return DataLoader(
self.val,
batch_size=self.batch_size,
num_workers=self.num_dataloader_workers,
pin_memory=True,
)
def test_dataloader(self):
return DataLoader(
self.test,
batch_size=self.batch_size,
num_workers=self.num_dataloader_workers,
pin_memory=True,
)
| 2.8125 | 3 |
test/api_loadtest.py | shashi/JuliaBox | 50 | 12766342 | <reponame>shashi/JuliaBox
import random
import requests
from locust import HttpLocust, TaskSet
# disable warnings from printing
requests.packages.urllib3.disable_warnings()
# register APIs fib1 to fib10 with the following code:
# fib(n::AbstractString) = fib(parse(Int, n)); fib(n::Int) = (n < 2) ? n : (fib(n-1) + fib(n-2)); process([(fib, false)]);
def pyfib(n):
if n == 1 or n == 2:
return 1
elif n == 0:
return 0
return pyfib(n-1) + pyfib(n-2)
def check_response(response, n):
resp = response.content.strip()
val = int(resp)
expected = pyfib(n)
if val != expected:
response.failure("Expected fib(%d)=%d. Got %d" % (n, expected, val))
else:
response.success()
def genfib(apiname):
def _fib(l):
n = random.randint(0, 10)
response = l.client.get("/%s/fib/%d" % (apiname, n), catch_response=True, verify=False)
check_response(response, n)
_fib.__name__ = apiname
return _fib
fibs = [genfib("fib%d" % (idx,)) for idx in range(1, 11)]
class UserBehavior(TaskSet):
tasks = {fibs[i]: i for i in range(0, 10)}
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 1000
max_wait = 10000
| 2.515625 | 3 |
certleak/core/certstreamdata/certstreamobject.py | d-Rickyy-b/certleak | 0 | 12766343 | # -*- coding: utf-8 -*-
class CertstreamObject(object):
"""Base class for all the certstream data classes"""
@classmethod
def from_dict(cls, data):
"""
Returns a copy of the passed data
:param data: The dict from which an object should be created from
:return: copy of data or None
"""
if not data:
return None
data = data.copy()
return data
| 2.828125 | 3 |
quarrel/http.py | mrvillage/quarrel | 2 | 12766344 | """
The MIT License (MIT)
Copyright (c) 2021-present Village
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import datetime
import sys
from typing import TYPE_CHECKING
import aiohttp
from . import __version__
from .errors import (
BadRequest,
Forbidden,
HTTPException,
MethodNotAllowed,
NotFound,
ServerError,
Unauthorized,
)
from .missing import MISSING
__all__ = ("HTTP",)
if TYPE_CHECKING:
from typing import (
Any,
ClassVar,
Coroutine,
Dict,
Mapping,
Optional,
Sequence,
Type,
TypeVar,
Union,
)
from .file import File
from .missing import Missing
from .types.interactions import (
ApplicationCommand,
InteractionResponse,
PartialApplicationCommand,
)
from .types.message import Message
T = TypeVar("T")
Response = Coroutine[Any, Any, T]
class Bucket:
__slots__ = (
"http",
"route_key",
"key",
"release_immediately",
"lock",
"limit",
"remaining",
"reset",
"reset_after",
"bucket",
"global_",
)
def __init__(self, http: HTTP, route_key: str, key: str, global_: bool, /) -> None:
self.http: HTTP = http
self.route_key: str = route_key
self.key: str = key
self.release_immediately: bool = True
self.lock: asyncio.Lock = asyncio.Lock()
self.limit: Optional[int] = None
self.remaining: Optional[int] = None
self.reset: Optional[float] = None
self.reset_after: Optional[float] = None
self.bucket: Optional[str] = None
self.global_: bool = global_
async def __aenter__(self) -> Bucket:
await self.lock.acquire()
if self.http.global_ratelimit.is_set() and self.global_:
await self.http.global_ratelimit.wait()
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[Any],
) -> None:
if self.release_immediately:
self.release()
def delay_amount(self) -> float:
utc = datetime.timezone.utc
now = datetime.datetime.now(utc)
if self.reset_after is None and self.reset is None:
return 0
return (
self.reset_after
or (
datetime.datetime.fromtimestamp(
# self.reset will never be None here
float(self.reset), # type: ignore
utc,
)
- now
).total_seconds()
)
def delay_release(self) -> None:
self.release_immediately = False
self.http.loop.call_later(self.delay_amount(), self.release)
def release(self) -> None:
self.lock.release()
if not self.lock._waiters and not self.lock.locked(): # type: ignore
if self.delay_amount() > 0:
self.http.loop.call_later(self.delay_amount(), self.expire)
else:
self.expire()
def expire(self) -> None:
if not self.lock._waiters and not self.lock.locked(): # type: ignore
del self.http.buckets[self.key]
@staticmethod
def bucket_key(
bucket: Optional[str],
channel_id: Optional[int] = None,
guild_id: Optional[int] = None,
webhook_id: Optional[int] = None,
webhook_token: Optional[str] = None,
) -> str:
return f"{bucket}___{channel_id}/{guild_id}/{webhook_id}/{webhook_token}"
@classmethod
def from_major_parameters(
cls,
http: HTTP,
route_key: str,
global_: bool = True,
/,
channel_id: Optional[int] = None,
guild_id: Optional[int] = None,
webhook_id: Optional[int] = None,
webhook_token: Optional[str] = None,
) -> Bucket:
key = cls.bucket_key(
http.route_buckets.get(route_key, route_key),
channel_id=channel_id,
guild_id=guild_id,
webhook_id=webhook_id,
webhook_token=webhook_token,
)
bucket_ = http.buckets.get(key)
if bucket_ is None:
bucket_ = cls(http, route_key, key, global_)
http.buckets[key] = bucket_
return bucket_
async def handle_ratelimit(
self, response: aiohttp.ClientResponse, data: Union[str, Dict[str, Any]], /
) -> None:
remaining = response.headers.get("X-RateLimit-Remaining")
if remaining is not None:
self.remaining = int(remaining)
limit = response.headers.get("X-RateLimit-Limit")
if limit is not None:
self.limit = int(limit)
reset = response.headers.get("X-RateLimit-Reset")
if reset is not None:
self.reset = float(reset)
reset_after = response.headers.get("X-RateLimit-Reset-After")
if reset_after is not None:
self.reset_after = float(reset_after)
bucket = response.headers.get("X-RateLimit-Bucket")
if bucket is not None and self.bucket is None:
self.bucket = bucket
self.http.route_buckets[self.route_key] = bucket
self.http.buckets.pop(self.key)
self.key = bucket + self.key.split("___")[1]
self.http.buckets[self.key] = self
if self.remaining == 0:
self.delay_release()
if response.status == 429:
if isinstance(data, str):
raise HTTPException(response, data)
global_ = response.headers.get(
"X-RateLimit-Global"
) is not None and data.get("global")
if global_:
self.http.global_ratelimit.set()
await asyncio.sleep(data["retry_after"])
if global_:
self.http.global_ratelimit.clear()
class HTTP:
BASE_URL: ClassVar[str] = "https://discord.com/api/v10"
USER_AGENT: ClassVar[str] = f"DiscordBot (https://github.com/mrvillage/quarrel {__version__}) Python/{sys.version_info[0]}.{sys.version_info[1]} aiohttp/{aiohttp.__version__}" # type: ignore
def __init__(
self,
session: aiohttp.ClientSession,
token: str,
application_id: int,
loop: asyncio.AbstractEventLoop,
/,
) -> None:
self.session: aiohttp.ClientSession = session
self.token: str = token
self.application_id: int = application_id
self.loop: asyncio.AbstractEventLoop = loop
self.buckets: Dict[str, Bucket] = {}
self.global_ratelimit: asyncio.Event = asyncio.Event()
self.headers = {
"User-Agent": self.USER_AGENT,
"Authorization": f"Bot {self.token}",
}
self.route_buckets: Dict[str, str] = {}
async def request(
self,
method: str,
path: str,
route_parameters: Missing[Mapping[str, Any]] = MISSING,
files: Missing[Sequence[File]] = MISSING,
*,
global_: bool = True,
**kwargs: Any,
) -> Any:
route_parameters = route_parameters or {}
if files is not MISSING:
form_data = aiohttp.FormData()
form_data.add_field(name="payload_json", value=kwargs.pop("json"))
for index, file in enumerate(files):
form_data.add_field(
name=f"file{index}",
value=file.buffer,
filename=file.name,
content_type="application/octet-stream",
)
kwargs["form"] = form_data
url = f"{self.BASE_URL}{path.format_map(route_parameters)}"
async with Bucket.from_major_parameters(
self,
method + path,
global_,
channel_id=route_parameters.get("channel_id"),
guild_id=route_parameters.get("guild_id"),
webhook_id=route_parameters.get("webhook_id"),
webhook_token=route_parameters.get("webhook_token"),
) as bucket:
response = None
data = None
for try_ in range(3):
async with self.session.request(
method, url, headers=self.headers, **kwargs
) as response:
if response.headers["content-type"] == "application/json":
data = await response.json()
else:
data = await response.text()
await bucket.handle_ratelimit(response, data)
if 300 > response.status >= 200:
return data
if response.status == 400:
raise BadRequest(response, data)
if response.status == 401:
raise Unauthorized(response, data)
if response.status == 403:
raise Forbidden(response, data)
if response.status == 404:
raise NotFound(response, data)
if response.status == 405:
raise MethodNotAllowed(response, data)
if response.status in {500, 502, 504}:
await asyncio.sleep(1 + try_)
continue
if response.status >= 500:
raise ServerError(response, data)
if response.status != 429:
raise HTTPException(response, data)
if response is not None:
if response.status >= 500:
raise ServerError(response, data)
raise HTTPException(response, data)
async def get_gateway_bot(
self, encoding: str = "json", compress: bool = True, v: int = 10
) -> str:
data = await self.request("GET", "/gateway/bot")
if compress:
return f"{data['url']}?encoding={encoding}&v={v}&compress=zlib-stream"
else:
return f"{data['url']}?encoding={encoding}&v={v}"
def bulk_upsert_global_application_commands(
self, commands: Sequence[PartialApplicationCommand]
) -> Response[Sequence[ApplicationCommand]]:
return self.request(
"PUT",
"/applications/{application_id}/commands",
{"application_id": self.application_id},
json=commands,
)
def bulk_upsert_guild_application_commands(
self, guild_id: int, commands: Sequence[PartialApplicationCommand]
) -> Response[Sequence[ApplicationCommand]]:
return self.request(
"PUT",
"/applications/{application_id}/guilds/{guild_id}/commands",
{"application_id": self.application_id, "guild_id": guild_id},
json=commands,
)
def create_interaction_response(
self, interaction_id: int, token: str, data: InteractionResponse
) -> Response[None]:
return self.request(
"POST",
"/interactions/{interaction_id}/{webhook_token}/callback",
{"interaction_id": interaction_id, "webhook_token": token},
json=data,
)
def get_original_interaction_response(self, token: str) -> Response[Message]:
return self.request(
"GET",
"/interactions/{application_id}/{webhook_token}/messages/@original",
{
"application_id": self.application_id,
"webhook_token": token,
},
)
# TODO proper typing for editing
def edit_original_interaction_response(
self, token: str, data: Any
) -> Response[Message]:
return self.request(
"PATCH",
"/interactions/{application_id}/{webhook_token}/messages/@original",
{
"application_id": self.application_id,
"webhook_token": token,
},
json=data,
)
def delete_original_interaction_response(self, token: str) -> Response[None]:
return self.request(
"DELETE",
"/interactions/{application_id}/{webhook_token}/messages/@original",
{
"application_id": self.application_id,
"webhook_token": token,
},
)
# TODO proper typing for creating
def create_followup_message(self, token: str, data: Any) -> Response[Message]:
return self.request(
"POST",
"/interactions/{application_id}/{webhook_token}",
{
"application_id": self.application_id,
"webhook_token": token,
},
json=data,
)
def get_followup_message(self, token: str, message_id: int) -> Response[Message]:
return self.request(
"GET",
"/interactions/{application_id}/{webhook_token}/messages/{message_id}",
{
"application_id": self.application_id,
"webhook_token": token,
"message_id": message_id,
},
)
# TODO proper typing for editing
def edit_followup_message(
self, token: str, message_id: int, data: Any
) -> Response[Message]:
return self.request(
"PATCH",
"/interactions/{application_id}/{webhook_token}/messages/{message_id}",
{
"application_id": self.application_id,
"webhook_token": token,
"message_id": message_id,
},
json=data,
)
def delete_followup_message(self, token: str, message_id: int) -> Response[None]:
return self.request(
"DELETE",
"/interactions/{application_id}/{webhook_token}/messages/{message_id}",
{
"application_id": self.application_id,
"webhook_token": token,
"message_id": message_id,
},
)
| 1.65625 | 2 |
dss_vae/utils/tree_analysis.py | baoy-nlp/DSS-VAE | 37 | 12766345 | from collections import defaultdict
from dss_vae.structs import GlobalNames
from dss_vae.structs import FScore
from dss_vae.structs import PhraseTree
from dss_vae.utils.utility import write_docs
from dss_vae.preprocess import s2b_to_s2t
from dss_vae.preprocess import s2t_check
from dss_vae.preprocess import s2t_fix
from dss_vae.preprocess import s2t_to_tree
def eval_s2t(preds, golds):
error_count = 0
eval_gold = []
eval_pred = []
for pred, gold in zip(preds, golds):
if s2t_check(pred):
eval_gold.append(s2t_to_tree(s2t_str=gold))
eval_pred.append(s2t_to_tree(s2t_str=pred))
else:
eval_gold.append(s2t_to_tree(s2t_str=gold))
eval_pred.append("(TOP XX)")
error_count += 1
return FScore.eval_seq_list(gold_seqs=eval_gold, test_seqs=eval_pred), error_count
def eval_s2t_robust(preds, golds):
error_count = 0
error_sum_fix = 0.0
error_fix_sents = 0.0
eval_gold = []
eval_pred = []
for pred, gold in zip(preds, golds):
pred, error_fix = s2t_fix(pred, fm=GlobalNames.get_fm())
error_sum_fix += error_fix
if error_fix > 0:
error_fix_sents += 1
gold, _ = s2t_fix(gold, fm=GlobalNames.get_fm())
if s2t_check(pred):
eval_gold.append(s2t_to_tree(s2t_str=gold))
eval_pred.append(s2t_to_tree(s2t_str=pred))
else:
eval_gold.append(s2t_to_tree(s2t_str=gold))
eval_pred.append("(TOP XX)")
error_count += 1
avg_error = error_sum_fix / error_fix_sents if error_fix_sents > 0 else 0.0
return FScore.eval_seq_list(gold_seqs=eval_gold, test_seqs=eval_pred), "{},avg_fix:{}".format(error_count, avg_error)
def eval_s2b(preds, golds):
error_count = 0
error_sum_fix = 0.0
error_fix_sents = 0.0
eval_gold = []
eval_pred = []
for pred, gold in zip(preds, golds):
pred, error_fix = s2b_to_s2t(pred, fm=GlobalNames.get_fm())
error_sum_fix += error_fix
if error_fix > 0:
error_fix_sents += 1
gold, _ = s2b_to_s2t(gold, fm=GlobalNames.get_fm())
if s2t_check(pred):
eval_gold.append(s2t_to_tree(s2t_str=gold))
eval_pred.append(s2t_to_tree(s2t_str=pred))
else:
eval_gold.append(s2t_to_tree(s2t_str=gold))
eval_pred.append("(TOP XX)")
error_count += 1
avg_error = error_sum_fix / error_fix_sents if error_fix_sents > 0 else 0.0
return FScore.eval_seq_list(gold_seqs=eval_gold, test_seqs=eval_pred), "{},avg_fix:{}".format(error_count, avg_error)
def eval_file(pred_file, gold_file):
pred = []
gold = []
with open(pred_file, 'r') as f:
for line in f:
pred.append(line)
with open(gold_file, 'r') as f:
for line in f:
gold.append(line)
return eval_s2t(pred, gold)
def extract_origin_grammar(tree_file, out_file="grammar.out"):
grammar_dict = defaultdict(int)
trees = PhraseTree.load_treefile(tree_file)
for tree in trees:
tree.grammar(grammar_dict)
grammar_list = [grammar for grammar, val in grammar_dict.items()]
write_docs(fname=out_file, docs=grammar_list)
return grammar_dict
def extract_binary_grammar(tree_file, out_file="grammar.out"):
grammar_dict = defaultdict(int)
trees = PhraseTree.load_treefile(tree_file)
for tree in trees:
tree.binarize()
tree.grammar(grammar_dict)
grammar_list = [grammar for grammar, val in grammar_dict.items()]
write_docs(fname=out_file, docs=grammar_list)
return grammar_dict
def evaluate_coverage(dict_a, dict_b):
sum_val = 0.0
count = 0.0
for item, _ in dict_b.items():
sum_val += 1.0
if item in dict_a:
count += 1.0
return count * 100.0 / sum_val
def evaluate_using_ratio(dict_a, dict_b):
sum_val = 0.0
count = 0.0
for item, val in dict_b.items():
sum_val += val
if item in dict_a:
count += val
return count * 100.0 / sum_val
def evaluate_grammar_coverage(train_file, dev_file, test_file, grammar_type='.binary'):
if grammar_type == '.binary':
extract_grammar = extract_binary_grammar
else:
extract_grammar = extract_origin_grammar
train_dict = extract_grammar(train_file, train_file + grammar_type)
dev_dict = extract_grammar(dev_file, dev_file + grammar_type)
test_dict = extract_grammar(test_file, test_file + grammar_type)
print("cover dev:{}".format(evaluate_coverage(train_dict, dev_dict)))
print("cover test:{}".format(evaluate_coverage(train_dict, test_dict)))
print("ratio dev:{}".format(evaluate_using_ratio(train_dict, dev_dict)))
print("ratio test:{}".format(evaluate_using_ratio(train_dict, test_dict)))
| 2.21875 | 2 |
rsi/rsi/report/summary_pembelian/summary_pembelian.py | bobzz-zone/rsi | 0 | 12766346 | <reponame>bobzz-zone/rsi
# Copyright (c) 2013, myme and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
def execute(filters=None):
columns, data = ["Tanggal:Date:150","Hari:Data:150","Faktur:Int:100","Jumlah:Currency:300"], []
where=""
if filters.get("warehouse"):
where=""" and a.warehouse="{}" """.format(filters.get("warehouse"))
if filters.get("supplier"):
where=""" {} and a.supplier="{}" """.format(where ,filters.get("supplier"))
if filters.get("remark"):
where=""" {} and a.remarks like "%{}%" """.format(where,filters.get("remark"))
data = frappe.db.sql("""select a.posting_date,date_format(a.posting_date,"%W"),count(1) as "qty" , sum(a.base_grand_total) as "omset"
from (select p.posting_date,p.base_grand_total,pi.warehouse,p.remarks,p.supplier,p.is_return,p.docstatus from `tabPurchase Invoice` p join `tabPurchase Invoice Item` pi on p.name=pi.parent group by p.name) a
where a.docstatus=1 and a.is_return=0 {} and (a.posting_date between "{}" and "{}") group by a.posting_date """.format(where,filters.get("from_date"),filters.get("to_date")),as_list=1)
return columns, data
| 1.921875 | 2 |
AET/imagenet/config/ImageNet_Unsupervised.py | pjwu1997/teil_project | 114 | 12766347 | <reponame>pjwu1997/teil_project<filename>AET/imagenet/config/ImageNet_Unsupervised.py
batch_size = 192*4
config = {}
# set the parameters related to the training and testing set
data_train_opt = {}
data_train_opt['batch_size'] = batch_size
data_train_opt['unsupervised'] = True
data_train_opt['epoch_size'] = None
data_train_opt['random_sized_crop'] = False
data_train_opt['dataset_name'] = 'imagenet'
data_train_opt['split'] = 'train'
data_test_opt = {}
data_test_opt['batch_size'] = batch_size
data_test_opt['unsupervised'] = True
data_test_opt['epoch_size'] = None
data_test_opt['random_sized_crop'] = False
data_test_opt['dataset_name'] = 'imagenet'
data_test_opt['split'] = 'val'
config['data_train_opt'] = data_train_opt
config['data_test_opt'] = data_test_opt
config['max_num_epochs'] = 200
net_opt = {}
net_opt['num_classes'] = 8
net_opt['num_stages'] = 4
networks = {}
net_optim_params = {'optim_type': 'sgd', 'lr': 0.01, 'momentum':0.9, 'weight_decay': 5e-4, 'nesterov': True, 'LUT_lr':[(100, 0.01),(150,0.001),(200,0.0001)]}
networks['model'] = {'def_file': 'architectures/AlexNet.py', 'pretrained': None, 'opt': net_opt, 'optim_params': net_optim_params}
config['networks'] = networks
criterions = {}
criterions['loss'] = {'ctype':'MSELoss', 'opt':True}
config['criterions'] = criterions
config['algorithm_type'] = 'UnsupervisedModel'
| 1.945313 | 2 |
01 - Basics/00-install.py | python-demo-codes/basics | 2 | 12766348 | <reponame>python-demo-codes/basics
# HEAD
# Python Basics
# DESCRIPTION
# Describe the basic setup, and development environment of Python
# RESOURCES
#
# ----------PYTHON----------
# PYTHON ORG
# https://www.python.org/
# ANACONDA
# https://www.anaconda.com/
# ----------IDE-------------
# JUPYTER
# python3 -m pip install --upgrade pip
# python3 -m pip install jupyter
# PYCHARM and VSCODE
# https://www.jetbrains.com/pycharm/
# sudo snap install <pycharm-professional,·pycharm-community,or·pycharm-educational> --classic
# https://code.visualstudio.com/
# Install executable
# ----------ENVIRONMENT-------------
# https://docs.python.org/3/using/cmdline.html
# ----------PACKAGES-------------
# SQLALCHEMY and SQLALCHEMY-UTILS
# python3 -m pip install --upgrade pip
# python3 -m pip install sqlalchemy, sqlalchemy-utils, pymssql, pymysql, pyodbc
# MONGODB
# python3 -m pip install --upgrade pip
# python3 -m pip install mongodb, pymongo
# DJANGO
# python3 -m pip install --upgrade pip
# python3 -m pip install django
# FLASK
# python3 -m pip install --upgrade pip
# python3 -m pip install flask
# BCRYPT
# python3 -m pip install --upgrade pip
# python3 -m pip install bcrypt
# BEAUTIFULSOUP4
# python3 -m pip install --upgrade pip
# python3 -m pip install beautifulsoup4
# NUMPY
# python3 -m pip install --upgrade pip
# python3 -m pip install numpy
# NUMPY-BASE
# python3 -m pip install --upgrade pip
# python3 -m pip install numpy-base
# SCIPY and STATSMODELS
# python3 -m pip install --upgrade pip
# python3 -m pip install scipy, statsmodels
# MATHPLOTLIB
# python3 -m pip install --upgrade pip
# python3 -m pip install mathplotlib
# PANDAS
# python3 -m pip install --upgrade pip
# python3 -m pip install pandas
# PANDAS-DATAREADER
# python3 -m pip install --upgrade pip
# python3 -m pip install pandas-datareader
# PANDAS-PROFILING
# python3 -m pip install --upgrade pip
# python3 -m pip install pandas-profiling
# PANDAS-SQL
# python3 -m pip install --upgrade pip
# python3 -m pip install pandassql
# PYTABLES - Brings together Python, HDF5 and NumPy to easily handle large amounts of data.
# python3 -m pip install --upgrade pip
# python3 -m pip install pytables
# PACKAGING
# python3 -m pip install --upgrade pip
# python3 -m pip install packaging
# PYOPENSSL
# python3 -m pip install --upgrade pip
# python3 -m pip install pyopenssl
# PYQT
# python3 -m pip install --upgrade pip
# python3 -m pip install pyqt
# PLOTLY
# python3 -m pip install --upgrade pip
# python3 -m pip install plotly
# CYTHON
# python3 -m pip install --upgrade pip
# python3 -m pip install cython
# PYTEST
# python3 -m pip install --upgrade pip
# python3 -m pip install pytest
# pytest 5.0.1 Simple and powerful testing with Python. / MIT
# pytest-arraydiff 0.3 pytest plugin to help with comparing array output from tests / BSD
# pytest-astropy 0.5.0 Meta-package containing dependencies for testing Astropy / BSD
# pytest-asyncio 0.6.0 Pytest support for asyncio / Apache 2.0
# pytest-cache 1.0 pytest plugin with mechanisms for caching across test runs / MIT
# pytest-cov 2.7.1 Pytest plugin for measuring coverage / MIT
# pytest-doctestplus 0.3.0 Pytest plugin with advanced doctest features / BSD
# pytest-mock 1.10.4 Thin-wrapper around the mock package for easier use with py.test / MIT
# pytest-openfiles 0.3.2 Pytest plugin for detecting inadvertent open file handles / BSD
# pytest-pep8 1.0.6 py.test plugin for efficiently checking PEP8 compliance / MIT
# pytest-remotedata 0.3.1 Pytest plugin for controlling remote data access / BSD
# pytest-runner 2.11.1 Invoke py.test as distutils command with dependency resolution. / MIT
# pytest-timeout 1.3.3 This is a plugin which will terminate tests after a certain timeout. / MIT
# pytest-xdist 1.28.0 py.test xdist plugin for distributed testing and loop-on-failing modes / MIT
# PYTHON-DATEUTIL
# python3 -m pip install --upgrade pip
# python3 -m pip install python-dateutil
# SCIKIT-LEARN and SCIKIT-IMAGE and SCIKIT-RF
# python3 -m pip install --upgrade pip
# python3 -m pip install scikit-bio, scikit-learn, scikit-image
# SEABORN
# python3 -m pip install --upgrade pip
# python3 -m pip install seaborn
# SIMPLEJSON
# python3 -m pip install --upgrade pip
# python3 -m pip install simplejson
# WORKER-POOL
# python3 -m pip install --upgrade pip
# python3 -m pip install worker-pool
# VITUALENV
# python3 -m pip install --upgrade pip
# python3 -m pip install virtualenv
# WHEEL - https://pythonwheels.com/ https://pypi.org/
# python3 -m pip install --upgrade pip
# python3 -m pip install wheel
# WXPYTHON and TKINTER
# python3 -m pip install --upgrade pip
# python3 -m pip install wxpython, tkinter
# ANACONDA PACKAGES
# https://docs.anaconda.com/anaconda/packages/py3.7_linux-64/
| 2.671875 | 3 |
models/fpn_global_local_fmreg_ensemble.py | yinchimaoliang/GLNet | 119 | 12766349 | from .resnet import resnet50
import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
class fpn_module_global(nn.Module):
def __init__(self, numClass):
super(fpn_module_global, self).__init__()
self._up_kwargs = {'mode': 'bilinear'}
# Top layer
self.toplayer = nn.Conv2d(2048, 256, kernel_size=1, stride=1, padding=0) # Reduce channels
# Lateral layers
self.latlayer1 = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0)
self.latlayer3 = nn.Conv2d(256, 256, kernel_size=1, stride=1, padding=0)
# Smooth layers
self.smooth1_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth2_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth3_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth4_1 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.smooth1_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
self.smooth2_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
self.smooth3_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
self.smooth4_2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
# Classify layers
self.classify = nn.Conv2d(128*4, numClass, kernel_size=3, stride=1, padding=1)
# Local2Global: double #channels ####################################
# Top layer
self.toplayer_ext = nn.Conv2d(2048*2, 256, kernel_size=1, stride=1, padding=0) # Reduce channels
# Lateral layers
self.latlayer1_ext = nn.Conv2d(1024*2, 256, kernel_size=1, stride=1, padding=0)
self.latlayer2_ext = nn.Conv2d(512*2, 256, kernel_size=1, stride=1, padding=0)
self.latlayer3_ext = nn.Conv2d(256*2, 256, kernel_size=1, stride=1, padding=0)
# Smooth layers
self.smooth1_1_ext = nn.Conv2d(256*2, 256, kernel_size=3, stride=1, padding=1)
self.smooth2_1_ext = nn.Conv2d(256*2, 256, kernel_size=3, stride=1, padding=1)
self.smooth3_1_ext = nn.Conv2d(256*2, 256, kernel_size=3, stride=1, padding=1)
self.smooth4_1_ext = nn.Conv2d(256*2, 256, kernel_size=3, stride=1, padding=1)
self.smooth1_2_ext = nn.Conv2d(256*2, 128, kernel_size=3, stride=1, padding=1)
self.smooth2_2_ext = nn.Conv2d(256*2, 128, kernel_size=3, stride=1, padding=1)
self.smooth3_2_ext = nn.Conv2d(256*2, 128, kernel_size=3, stride=1, padding=1)
self.smooth4_2_ext = nn.Conv2d(256*2, 128, kernel_size=3, stride=1, padding=1)
self.smooth = nn.Conv2d(128*4*2, 128*4, kernel_size=3, stride=1, padding=1)
def _concatenate(self, p5, p4, p3, p2):
_, _, H, W = p2.size()
p5 = F.interpolate(p5, size=(H, W), **self._up_kwargs)
p4 = F.interpolate(p4, size=(H, W), **self._up_kwargs)
p3 = F.interpolate(p3, size=(H, W), **self._up_kwargs)
return torch.cat([p5, p4, p3, p2], dim=1)
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.interpolate(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), **self._up_kwargs) + y
def forward(self, c2, c3, c4, c5, c2_ext=None, c3_ext=None, c4_ext=None, c5_ext=None, ps0_ext=None, ps1_ext=None, ps2_ext=None):
# Top-down
if c5_ext is None:
p5 = self.toplayer(c5)
p4 = self._upsample_add(p5, self.latlayer1(c4))
p3 = self._upsample_add(p4, self.latlayer2(c3))
p2 = self._upsample_add(p3, self.latlayer3(c2))
else:
p5 = self.toplayer_ext(torch.cat((c5, c5_ext), dim=1))
p4 = self._upsample_add(p5, self.latlayer1_ext(torch.cat((c4, c4_ext), dim=1)))
p3 = self._upsample_add(p4, self.latlayer2_ext(torch.cat((c3, c3_ext), dim=1)))
p2 = self._upsample_add(p3, self.latlayer3_ext(torch.cat((c2, c2_ext), dim=1)))
ps0 = [p5, p4, p3, p2]
# Smooth
if ps0_ext is None:
p5 = self.smooth1_1(p5)
p4 = self.smooth2_1(p4)
p3 = self.smooth3_1(p3)
p2 = self.smooth4_1(p2)
else:
p5 = self.smooth1_1_ext(torch.cat((p5, ps0_ext[0]), dim=1))
p4 = self.smooth2_1_ext(torch.cat((p4, ps0_ext[1]), dim=1))
p3 = self.smooth3_1_ext(torch.cat((p3, ps0_ext[2]), dim=1))
p2 = self.smooth4_1_ext(torch.cat((p2, ps0_ext[3]), dim=1))
ps1 = [p5, p4, p3, p2]
if ps1_ext is None:
p5 = self.smooth1_2(p5)
p4 = self.smooth2_2(p4)
p3 = self.smooth3_2(p3)
p2 = self.smooth4_2(p2)
else:
p5 = self.smooth1_2_ext(torch.cat((p5, ps1_ext[0]), dim=1))
p4 = self.smooth2_2_ext(torch.cat((p4, ps1_ext[1]), dim=1))
p3 = self.smooth3_2_ext(torch.cat((p3, ps1_ext[2]), dim=1))
p2 = self.smooth4_2_ext(torch.cat((p2, ps1_ext[3]), dim=1))
ps2 = [p5, p4, p3, p2]
# Classify
if ps2_ext is None:
ps3 = self._concatenate(p5, p4, p3, p2)
output = self.classify(ps3)
else:
p = self._concatenate(
torch.cat((p5, ps2_ext[0]), dim=1),
torch.cat((p4, ps2_ext[1]), dim=1),
torch.cat((p3, ps2_ext[2]), dim=1),
torch.cat((p2, ps2_ext[3]), dim=1)
)
ps3 = self.smooth(p)
output = self.classify(ps3)
return output, ps0, ps1, ps2, ps3
class fpn_module_local(nn.Module):
def __init__(self, numClass):
super(fpn_module_local, self).__init__()
self._up_kwargs = {'mode': 'bilinear'}
# Top layer
fold = 2
self.toplayer = nn.Conv2d(2048 * fold, 256, kernel_size=1, stride=1, padding=0) # Reduce channels
# Lateral layers [C]
self.latlayer1 = nn.Conv2d(1024 * fold, 256, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(512 * fold, 256, kernel_size=1, stride=1, padding=0)
self.latlayer3 = nn.Conv2d(256 * fold, 256, kernel_size=1, stride=1, padding=0)
# Smooth layers
# ps0
self.smooth1_1 = nn.Conv2d(256 * fold, 256, kernel_size=3, stride=1, padding=1)
self.smooth2_1 = nn.Conv2d(256 * fold, 256, kernel_size=3, stride=1, padding=1)
self.smooth3_1 = nn.Conv2d(256 * fold, 256, kernel_size=3, stride=1, padding=1)
self.smooth4_1 = nn.Conv2d(256 * fold, 256, kernel_size=3, stride=1, padding=1)
# ps1
self.smooth1_2 = nn.Conv2d(256 * fold, 128, kernel_size=3, stride=1, padding=1)
self.smooth2_2 = nn.Conv2d(256 * fold, 128, kernel_size=3, stride=1, padding=1)
self.smooth3_2 = nn.Conv2d(256 * fold, 128, kernel_size=3, stride=1, padding=1)
self.smooth4_2 = nn.Conv2d(256 * fold, 128, kernel_size=3, stride=1, padding=1)
# ps2 is concatenation
# Classify layers
self.smooth = nn.Conv2d(128*4*fold, 128*4, kernel_size=3, stride=1, padding=1)
self.classify = nn.Conv2d(128*4, numClass, kernel_size=3, stride=1, padding=1)
def _concatenate(self, p5, p4, p3, p2):
_, _, H, W = p2.size()
p5 = F.interpolate(p5, size=(H, W), **self._up_kwargs)
p4 = F.interpolate(p4, size=(H, W), **self._up_kwargs)
p3 = F.interpolate(p3, size=(H, W), **self._up_kwargs)
return torch.cat([p5, p4, p3, p2], dim=1)
def _upsample_add(self, x, y):
'''Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.interpolate(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
'''
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), **self._up_kwargs) + y
def forward(self, c2, c3, c4, c5, c2_ext, c3_ext, c4_ext, c5_ext, ps0_ext, ps1_ext, ps2_ext):
# Top-down
p5 = self.toplayer(torch.cat([c5] + [F.interpolate(c5_ext[0], size=c5.size()[2:], **self._up_kwargs)], dim=1))
p4 = self._upsample_add(p5, self.latlayer1(torch.cat([c4] + [F.interpolate(c4_ext[0], size=c4.size()[2:], **self._up_kwargs)], dim=1)))
p3 = self._upsample_add(p4, self.latlayer2(torch.cat([c3] + [F.interpolate(c3_ext[0], size=c3.size()[2:], **self._up_kwargs)], dim=1)))
p2 = self._upsample_add(p3, self.latlayer3(torch.cat([c2] + [F.interpolate(c2_ext[0], size=c2.size()[2:], **self._up_kwargs)], dim=1)))
ps0 = [p5, p4, p3, p2]
# Smooth
p5 = self.smooth1_1(torch.cat([p5] + [F.interpolate(ps0_ext[0][0], size=p5.size()[2:], **self._up_kwargs)], dim=1))
p4 = self.smooth2_1(torch.cat([p4] + [F.interpolate(ps0_ext[1][0], size=p4.size()[2:], **self._up_kwargs)], dim=1))
p3 = self.smooth3_1(torch.cat([p3] + [F.interpolate(ps0_ext[2][0], size=p3.size()[2:], **self._up_kwargs)], dim=1))
p2 = self.smooth4_1(torch.cat([p2] + [F.interpolate(ps0_ext[3][0], size=p2.size()[2:], **self._up_kwargs)], dim=1))
ps1 = [p5, p4, p3, p2]
p5 = self.smooth1_2(torch.cat([p5] + [F.interpolate(ps1_ext[0][0], size=p5.size()[2:], **self._up_kwargs)], dim=1))
p4 = self.smooth2_2(torch.cat([p4] + [F.interpolate(ps1_ext[1][0], size=p4.size()[2:], **self._up_kwargs)], dim=1))
p3 = self.smooth3_2(torch.cat([p3] + [F.interpolate(ps1_ext[2][0], size=p3.size()[2:], **self._up_kwargs)], dim=1))
p2 = self.smooth4_2(torch.cat([p2] + [F.interpolate(ps1_ext[3][0], size=p2.size()[2:], **self._up_kwargs)], dim=1))
ps2 = [p5, p4, p3, p2]
# Classify
# use ps2_ext
ps3 = self._concatenate(
torch.cat([p5] + [F.interpolate(ps2_ext[0][0], size=p5.size()[2:], **self._up_kwargs)], dim=1),
torch.cat([p4] + [F.interpolate(ps2_ext[1][0], size=p4.size()[2:], **self._up_kwargs)], dim=1),
torch.cat([p3] + [F.interpolate(ps2_ext[2][0], size=p3.size()[2:], **self._up_kwargs)], dim=1),
torch.cat([p2] + [F.interpolate(ps2_ext[3][0], size=p2.size()[2:], **self._up_kwargs)], dim=1)
)
ps3 = self.smooth(ps3)
output = self.classify(ps3)
return output, ps0, ps1, ps2, ps3
class fpn(nn.Module):
def __init__(self, numClass):
super(fpn, self).__init__()
self._up_kwargs = {'mode': 'bilinear'}
# Res net
self.resnet_global = resnet50(True)
self.resnet_local = resnet50(True)
# fpn module
self.fpn_global = fpn_module_global(numClass)
self.fpn_local = fpn_module_local(numClass)
self.c2_g = None; self.c3_g = None; self.c4_g = None; self.c5_g = None; self.output_g = None
self.ps0_g = None; self.ps1_g = None; self.ps2_g = None; self.ps3_g = None
self.c2_l = []; self.c3_l = []; self.c4_l = []; self.c5_l = [];
self.ps00_l = []; self.ps01_l = []; self.ps02_l = []; self.ps03_l = [];
self.ps10_l = []; self.ps11_l = []; self.ps12_l = []; self.ps13_l = [];
self.ps20_l = []; self.ps21_l = []; self.ps22_l = []; self.ps23_l = [];
self.ps0_l = None; self.ps1_l = None; self.ps2_l = None
self.ps3_l = []#; self.output_l = []
self.c2_b = None; self.c3_b = None; self.c4_b = None; self.c5_b = None;
self.ps00_b = None; self.ps01_b = None; self.ps02_b = None; self.ps03_b = None;
self.ps10_b = None; self.ps11_b = None; self.ps12_b = None; self.ps13_b = None;
self.ps20_b = None; self.ps21_b = None; self.ps22_b = None; self.ps23_b = None;
self.ps3_b = []#; self.output_b = []
self.patch_n = 0
self.mse = nn.MSELoss()
self.ensemble_conv = nn.Conv2d(128*4 * 2, numClass, kernel_size=3, stride=1, padding=1)
nn.init.normal_(self.ensemble_conv.weight, mean=0, std=0.01)
# init fpn
for m in self.fpn_global.children():
if hasattr(m, 'weight'): nn.init.normal_(m.weight, mean=0, std=0.01)
if hasattr(m, 'bias'): nn.init.constant_(m.bias, 0)
for m in self.fpn_local.children():
if hasattr(m, 'weight'): nn.init.normal_(m.weight, mean=0, std=0.01)
if hasattr(m, 'bias'): nn.init.constant_(m.bias, 0)
def clear_cache(self):
self.c2_g = None; self.c3_g = None; self.c4_g = None; self.c5_g = None; self.output_g = None
self.ps0_g = None; self.ps1_g = None; self.ps2_g = None; self.ps3_g = None
self.c2_l = []; self.c3_l = []; self.c4_l = []; self.c5_l = [];
self.ps00_l = []; self.ps01_l = []; self.ps02_l = []; self.ps03_l = [];
self.ps10_l = []; self.ps11_l = []; self.ps12_l = []; self.ps13_l = [];
self.ps20_l = []; self.ps21_l = []; self.ps22_l = []; self.ps23_l = [];
self.ps0_l = None; self.ps1_l = None; self.ps2_l = None
self.ps3_l = []; self.output_l = []
self.c2_b = None; self.c3_b = None; self.c4_b = None; self.c5_b = None;
self.ps00_b = None; self.ps01_b = None; self.ps02_b = None; self.ps03_b = None;
self.ps10_b = None; self.ps11_b = None; self.ps12_b = None; self.ps13_b = None;
self.ps20_b = None; self.ps21_b = None; self.ps22_b = None; self.ps23_b = None;
self.ps3_b = []; self.output_b = []
self.patch_n = 0
def _sample_grid(self, fm, bbox, sampleSize):
"""
:param fm: tensor(b,c,h,w) the global feature map
:param bbox: list [b* nparray(x1, y1, x2, y2)] the (x1,y1) is the left_top of bbox, (x2, y2) is the right_bottom of bbox
there are in range [0, 1]. x is corresponding to width dimension and y is corresponding to height dimension
:param sampleSize: (oH, oW) the point to sample in height dimension and width dimension
:return: tensor(b, c, oH, oW) sampled tensor
"""
b, c, h, w = fm.shape
b_bbox = len(bbox)
bbox = [x*2 - 1 for x in bbox] # range transform
if b != b_bbox and b == 1:
fm = torch.cat([fm,]*b_bbox, dim=0)
grid = np.zeros((b_bbox,) + sampleSize + (2,), dtype=np.float32)
gridMap = np.array([[(cnt_w/(sampleSize[1]-1), cnt_h/(sampleSize[0]-1)) for cnt_w in range(sampleSize[1])] for cnt_h in range(sampleSize[0])])
for cnt_b in range(b_bbox):
grid[cnt_b, :, :, 0] = bbox[cnt_b][0] + (bbox[cnt_b][2] - bbox[cnt_b][0])*gridMap[:, :, 0]
grid[cnt_b, :, :, 1] = bbox[cnt_b][1] + (bbox[cnt_b][3] - bbox[cnt_b][1])*gridMap[:, :, 1]
grid = torch.from_numpy(grid).cuda()
return F.grid_sample(fm, grid)
def _crop_global(self, f_global, top_lefts, ratio):
'''
top_lefts: [(top, left)] * b
'''
_, c, H, W = f_global.size()
b = len(top_lefts)
h, w = int(np.round(H * ratio[0])), int(np.round(W * ratio[1]))
# bbox = [ np.array([left, top, left + ratio, top + ratio]) for (top, left) in top_lefts ]
# crop = self._sample_grid(f_global, bbox, (H, W))
crop = []
for i in range(b):
top, left = int(np.round(top_lefts[i][0] * H)), int(np.round(top_lefts[i][1] * W))
# # global's sub-region & upsample
# f_global_patch = F.interpolate(f_global[0:1, :, top:top+h, left:left+w], size=(h, w), mode='bilinear')
f_global_patch = f_global[0:1, :, top:top+h, left:left+w]
crop.append(f_global_patch[0])
crop = torch.stack(crop, dim=0) # stack into mini-batch
return [crop] # return as a list for easy to torch.cat
def _merge_local(self, f_local, merge, f_global, top_lefts, oped, ratio, template):
'''
merge feature maps from local patches, and finally to a whole image's feature map (on cuda)
f_local: a sub_batch_size of patch's feature map
oped: [start, end)
'''
b, _, _, _ = f_local.size()
_, c, H, W = f_global.size() # match global feature size
if merge is None:
merge = torch.zeros((1, c, H, W)).cuda()
h, w = int(np.round(H * ratio[0])), int(np.round(W * ratio[1]))
for i in range(b):
index = oped[0] + i
top, left = int(np.round(H * top_lefts[index][0])), int(np.round(W * top_lefts[index][1]))
merge[:, :, top:top+h, left:left+w] += F.interpolate(f_local[i:i+1], size=(h, w), **self._up_kwargs)
if oped[1] >= len(top_lefts):
template = F.interpolate(template, size=(H, W), **self._up_kwargs)
template = template.expand_as(merge)
# template = Variable(template).cuda()
merge /= template
return merge
def ensemble(self, f_local, f_global):
return self.ensemble_conv(torch.cat((f_local, f_global), dim=1))
def collect_local_fm(self, image_global, patches, ratio, top_lefts, oped, batch_size, global_model=None, template=None, n_patch_all=None):
'''
patches: 1 patch
top_lefts: all top-left
oped: [start, end)
'''
with torch.no_grad():
if self.patch_n == 0:
self.c2_g, self.c3_g, self.c4_g, self.c5_g = global_model.module.resnet_global.forward(image_global)
self.output_g, self.ps0_g, self.ps1_g, self.ps2_g, self.ps3_g = global_model.module.fpn_global.forward(self.c2_g, self.c3_g, self.c4_g, self.c5_g)
# self.output_g = F.interpolate(self.output_g, image_global.size()[2:], mode='nearest')
self.patch_n += patches.size()[0]
self.patch_n %= n_patch_all
self.resnet_local.eval()
self.fpn_local.eval()
c2, c3, c4, c5 = self.resnet_local.forward(patches)
# global's 1x patch cat
output, ps0, ps1, ps2, ps3 = self.fpn_local.forward(
c2, c3, c4, c5,
self._crop_global(self.c2_g, top_lefts[oped[0]:oped[1]], ratio),
c3_ext=self._crop_global(self.c3_g, top_lefts[oped[0]:oped[1]], ratio),
c4_ext=self._crop_global(self.c4_g, top_lefts[oped[0]:oped[1]], ratio),
c5_ext=self._crop_global(self.c5_g, top_lefts[oped[0]:oped[1]], ratio),
ps0_ext=[ self._crop_global(f, top_lefts[oped[0]:oped[1]], ratio) for f in self.ps0_g ],
ps1_ext=[ self._crop_global(f, top_lefts[oped[0]:oped[1]], ratio) for f in self.ps1_g ],
ps2_ext=[ self._crop_global(f, top_lefts[oped[0]:oped[1]], ratio) for f in self.ps2_g ]
)
# output = F.interpolate(output, patches.size()[2:], mode='nearest')
self.c2_b = self._merge_local(c2, self.c2_b, self.c2_g, top_lefts, oped, ratio, template)
self.c3_b = self._merge_local(c3, self.c3_b, self.c3_g, top_lefts, oped, ratio, template)
self.c4_b = self._merge_local(c4, self.c4_b, self.c4_g, top_lefts, oped, ratio, template)
self.c5_b = self._merge_local(c5, self.c5_b, self.c5_g, top_lefts, oped, ratio, template)
self.ps00_b = self._merge_local(ps0[0], self.ps00_b, self.ps0_g[0], top_lefts, oped, ratio, template)
self.ps01_b = self._merge_local(ps0[1], self.ps01_b, self.ps0_g[1], top_lefts, oped, ratio, template)
self.ps02_b = self._merge_local(ps0[2], self.ps02_b, self.ps0_g[2], top_lefts, oped, ratio, template)
self.ps03_b = self._merge_local(ps0[3], self.ps03_b, self.ps0_g[3], top_lefts, oped, ratio, template)
self.ps10_b = self._merge_local(ps1[0], self.ps10_b, self.ps1_g[0], top_lefts, oped, ratio, template)
self.ps11_b = self._merge_local(ps1[1], self.ps11_b, self.ps1_g[1], top_lefts, oped, ratio, template)
self.ps12_b = self._merge_local(ps1[2], self.ps12_b, self.ps1_g[2], top_lefts, oped, ratio, template)
self.ps13_b = self._merge_local(ps1[3], self.ps13_b, self.ps1_g[3], top_lefts, oped, ratio, template)
self.ps20_b = self._merge_local(ps2[0], self.ps20_b, self.ps2_g[0], top_lefts, oped, ratio, template)
self.ps21_b = self._merge_local(ps2[1], self.ps21_b, self.ps2_g[1], top_lefts, oped, ratio, template)
self.ps22_b = self._merge_local(ps2[2], self.ps22_b, self.ps2_g[2], top_lefts, oped, ratio, template)
self.ps23_b = self._merge_local(ps2[3], self.ps23_b, self.ps2_g[3], top_lefts, oped, ratio, template)
self.ps3_b.append(ps3.cpu())
# self.output_b.append(output.cpu()) # each output is 1, 7, h, w
if self.patch_n == 0:
# merged all patches into an image
self.c2_l.append(self.c2_b); self.c3_l.append(self.c3_b); self.c4_l.append(self.c4_b); self.c5_l.append(self.c5_b);
self.ps00_l.append(self.ps00_b); self.ps01_l.append(self.ps01_b); self.ps02_l.append(self.ps02_b); self.ps03_l.append(self.ps03_b)
self.ps10_l.append(self.ps10_b); self.ps11_l.append(self.ps11_b); self.ps12_l.append(self.ps12_b); self.ps13_l.append(self.ps13_b)
self.ps20_l.append(self.ps20_b); self.ps21_l.append(self.ps21_b); self.ps22_l.append(self.ps22_b); self.ps23_l.append(self.ps23_b)
# collected all ps3 and output of patches as a (b) tensor, append into list
self.ps3_l.append(torch.cat(self.ps3_b, dim=0)); # a list of tensors
# self.output_l.append(torch.cat(self.output_b, dim=0)) # a list of 36, 7, h, w tensors
self.c2_b = None; self.c3_b = None; self.c4_b = None; self.c5_b = None;
self.ps00_b = None; self.ps01_b = None; self.ps02_b = None; self.ps03_b = None;
self.ps10_b = None; self.ps11_b = None; self.ps12_b = None; self.ps13_b = None;
self.ps20_b = None; self.ps21_b = None; self.ps22_b = None; self.ps23_b = None;
self.ps3_b = []# ; self.output_b = []
if len(self.c2_l) == batch_size:
self.c2_l = torch.cat(self.c2_l, dim=0)# .cuda()
self.c3_l = torch.cat(self.c3_l, dim=0)# .cuda()
self.c4_l = torch.cat(self.c4_l, dim=0)# .cuda()
self.c5_l = torch.cat(self.c5_l, dim=0)# .cuda()
self.ps00_l = torch.cat(self.ps00_l, dim=0)# .cuda()
self.ps01_l = torch.cat(self.ps01_l, dim=0)# .cuda()
self.ps02_l = torch.cat(self.ps02_l, dim=0)# .cuda()
self.ps03_l = torch.cat(self.ps03_l, dim=0)# .cuda()
self.ps10_l = torch.cat(self.ps10_l, dim=0)# .cuda()
self.ps11_l = torch.cat(self.ps11_l, dim=0)# .cuda()
self.ps12_l = torch.cat(self.ps12_l, dim=0)# .cuda()
self.ps13_l = torch.cat(self.ps13_l, dim=0)# .cuda()
self.ps20_l = torch.cat(self.ps20_l, dim=0)# .cuda()
self.ps21_l = torch.cat(self.ps21_l, dim=0)# .cuda()
self.ps22_l = torch.cat(self.ps22_l, dim=0)# .cuda()
self.ps23_l = torch.cat(self.ps23_l, dim=0)# .cuda()
self.ps0_l = [self.ps00_l, self.ps01_l, self.ps02_l, self.ps03_l]
self.ps1_l = [self.ps10_l, self.ps11_l, self.ps12_l, self.ps13_l]
self.ps2_l = [self.ps20_l, self.ps21_l, self.ps22_l, self.ps23_l]
# self.ps3_l = torch.cat(self.ps3_l, dim=0)# .cuda()
return self.ps3_l, output# self.output_l
def forward(self, image_global, patches, top_lefts, ratio, mode=1, global_model=None, n_patch=None):
if mode == 1:
# train global model
c2_g, c3_g, c4_g, c5_g = self.resnet_global.forward(image_global)
output_g, ps0_g, ps1_g, ps2_g, ps3_g = self.fpn_global.forward(c2_g, c3_g, c4_g, c5_g)
# imsize = image_global.size()[2:]
# output_g = F.interpolate(output_g, imsize, mode='nearest')
return output_g, None
elif mode == 2:
# train global2local model
with torch.no_grad():
if self.patch_n == 0:
# calculate global images only if patches belong to a new set of global images (when self.patch_n % n_patch == 0)
self.c2_g, self.c3_g, self.c4_g, self.c5_g = self.resnet_global.forward(image_global)
self.output_g, self.ps0_g, self.ps1_g, self.ps2_g, self.ps3_g = self.fpn_global.forward(self.c2_g, self.c3_g, self.c4_g, self.c5_g)
# imsize_glb = image_global.size()[2:]
# self.output_g = F.interpolate(self.output_g, imsize_glb, mode='nearest')
self.patch_n += patches.size()[0]
self.patch_n %= n_patch
# train local model #######################################
c2_l, c3_l, c4_l, c5_l = self.resnet_local.forward(patches)
# global's 1x patch cat
output_l, ps0_l, ps1_l, ps2_l, ps3_l = self.fpn_local.forward(c2_l, c3_l, c4_l, c5_l,
self._crop_global(self.c2_g, top_lefts, ratio),
self._crop_global(self.c3_g, top_lefts, ratio),
self._crop_global(self.c4_g, top_lefts, ratio),
self._crop_global(self.c5_g, top_lefts, ratio),
[ self._crop_global(f, top_lefts, ratio) for f in self.ps0_g ],
[ self._crop_global(f, top_lefts, ratio) for f in self.ps1_g ],
[ self._crop_global(f, top_lefts, ratio) for f in self.ps2_g ]
)
# imsize = patches.size()[2:]
# output_l = F.interpolate(output_l, imsize, mode='nearest')
ps3_g2l = self._crop_global(self.ps3_g, top_lefts, ratio)[0] # only calculate loss on 1x
ps3_g2l = F.interpolate(ps3_g2l, size=ps3_l.size()[2:], **self._up_kwargs)
output = self.ensemble(ps3_l, ps3_g2l)
# output = F.interpolate(output, imsize, mode='nearest')
return output, self.output_g, output_l, self.mse(ps3_l, ps3_g2l)
else:
# train local2global model
c2_g, c3_g, c4_g, c5_g = self.resnet_global.forward(image_global)
# local patch cat into global
output_g, ps0_g, ps1_g, ps2_g, ps3_g = self.fpn_global.forward(c2_g, c3_g, c4_g, c5_g, c2_ext=self.c2_l, c3_ext=self.c3_l, c4_ext=self.c4_l, c5_ext=self.c5_l, ps0_ext=self.ps0_l, ps1_ext=self.ps1_l, ps2_ext=self.ps2_l)
# imsize = image_global.size()[2:]
# output_g = F.interpolate(output_g, imsize, mode='nearest')
self.clear_cache()
return output_g, ps3_g | 2.15625 | 2 |
src/opnsense/scripts/filter/pftop.py | onedr0p/core | 2,109 | 12766350 | <filename>src/opnsense/scripts/filter/pftop.py
#!/usr/local/bin/python3
"""
Copyright (c) 2021 <NAME> <<EMAIL>>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import ujson
import argparse
from lib.states import query_top
if __name__ == '__main__':
# parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--filter', help='filter results', default='')
parser.add_argument('--limit', help='limit number of results', default='')
parser.add_argument('--offset', help='offset results', default='')
parser.add_argument('--label', help='label / rule id', default='')
parser.add_argument('--sort_by', help='sort by (field asc|desc)', default='')
inputargs = parser.parse_args()
result = {
'details': query_top(filter_str=inputargs.filter, rule_label=inputargs.label)
}
# sort results
if inputargs.sort_by.strip() != '' and len(result['details']) > 0:
sort_key = inputargs.sort_by.split()[0]
sort_desc = inputargs.sort_by.split()[-1] == 'desc'
if sort_key in result['details'][0]:
if type(result['details'][0][sort_key]) is int:
sorter = lambda k: k[sort_key] if sort_key in k else 0
else:
sorter = lambda k: str(k[sort_key]).lower() if sort_key in k else ''
result['details'] = sorted(result['details'], key=sorter, reverse=sort_desc)
result['total_entries'] = len(result['details'])
# apply offset and limit
if inputargs.offset.isdigit():
result['details'] = result['details'][int(inputargs.offset):]
if inputargs.limit.isdigit() and len(result['details']) >= int(inputargs.limit):
result['details'] = result['details'][:int(inputargs.limit)]
result['total'] = len(result['details'])
print(ujson.dumps(result))
| 1.59375 | 2 |